Department of Mathematics
 Search | Help | Login | pdf version | printable version

Math @ Duke





.......................

.......................


Publications of Henry Pfister    :chronological  combined listing:

%% Papers Published   
@article{fds360667,
   Author = {Rengaswamy, N and Seshadreesan, KP and Guha, S and Pfister,
             H},
   Title = {A Belief Propagation-based Quantum Joint-Detection Receiver
             for Superadditive Optical Communications},
   Journal = {2021 Conference on Lasers and Electro Optics, Cleo 2021
             Proceedings},
   Year = {2021},
   Month = {May},
   ISBN = {9781943580910},
   Abstract = {We design a quantum joint-detection receiver for
             binary-phase-shift-keyed optical communications using belief
             propagation with quantum messages. For an exemplary tree
             code, the receiver attains the block-Helstrom limit in
             discriminating the codewords and achieves superadditive
             capacity.},
   Key = {fds360667}
}

@article{fds360085,
   Author = {Rengaswamy, N and Seshadreesan, KP and Guha, S and Pfister,
             H},
   Title = {A belief propagation-based quantum joint-detection receiver
             for superadditive optical communications},
   Journal = {Optics Infobase Conference Papers},
   Year = {2021},
   Month = {January},
   ISBN = {9781557528209},
   Abstract = {We design a quantum joint-detection receiver for
             binary-phase-shift-keyed optical communications using belief
             propagation with quantum messages. For an exemplary tree
             code, the receiver attains the block-Helstrom limit in
             discriminating the codewords and achieves superadditive
             capacity.},
   Key = {fds360085}
}

@article{fds319341,
   Author = {Kumar, S and Young, AJ and Macris, N and Pfister,
             HD},
   Title = {A Proof of Threshold Saturation for Spatially-Coupled LDPC
             Codes on BMS Channels},
   Volume = {abs/1301.6111},
   Year = {2013},
   Key = {fds319341}
}

@article{fds319347,
   Author = {Kumar, S and Young, AJ and Maoris, N and Pfister,
             HD},
   Title = {A proof of threshold saturation for spatially-coupled LDPC
             codes on BMS channels},
   Journal = {2012 50th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2012},
   Pages = {176-184},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   ISBN = {9781467345385},
   url = {http://dx.doi.org/10.1109/Allerton.2012.6483215},
   Abstract = {Low-density parity-check (LDPC) convolutional codes have
             been shown to exhibit excellent performance under
             low-complexity belief-propagation decoding [1], [2]. This
             phenomenon is now termed threshold saturation via spatial
             coupling. The underlying principle behind this appears to be
             very general and spatially-coupled (SC) codes have been
             successfully applied in numerous areas. Recently, SC regular
             LDPC codes have been proven to achieve capacity universally,
             over the class of binary memoryless symmetric (BMS)
             channels, under belief-propagation decoding [3], [4]. In
             [5], [6], potential functions are used to prove that the BP
             threshold of SC irregular LDPC ensembles saturates, for the
             binary erasure channel, to the conjectured MAP threshold
             (known as the Maxwell threshold) of the underlying irregular
             ensembles. In this paper, that proof technique is
             generalized to BMS channels, thereby extending some results
             of [4] to irregular LDPC ensembles. We also believe that
             this approach can be expanded to cover a wide class of
             graphical models whose message-passing rules are associated
             with a Bethe free energy. © 2012 IEEE.},
   Doi = {10.1109/Allerton.2012.6483215},
   Key = {fds319347}
}

@article{fds319371,
   Author = {Nguyen, PS and Pfister, HD and Narayanan, KR},
   Title = {A rate-distortion exponent approach to multiple decoding
             attempts for Reed-Solomon codes},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {1095-1099},
   Year = {2010},
   Month = {August},
   ISBN = {9781424469604},
   url = {http://dx.doi.org/10.1109/ISIT.2010.5513703},
   Abstract = {Algorithms based on multiple decoding attempts of
             Reed-Solomon (RS) codes have recently attracted new
             attention. Choosing decoding candidates based on
             rate-distortion theory, as proposed previously by the
             authors, currently provides the best performance-versus-complexity
             trade-off. In this paper, an analysis based on the
             rate-distortion exponent is used to directly minimize the
             exponential decay rate of the error probability. This
             enables rigorous bounds on the error probability for
             finite-length RS codes and leads to modest performance
             gains. As a byproduct, a numerical method is derived that
             computes the rate-distortion exponent for independent
             non-identical sources. Analytical results are given for
             errors/erasures decoding. © 2010 IEEE.},
   Doi = {10.1109/ISIT.2010.5513703},
   Key = {fds319371}
}

@article{fds319377,
   Author = {Nguyen, PS and Pfister, HD and Narayanan, KR},
   Title = {A rate-distortion exponent approach to multiple decoding
             attempts for Reed-Solomon codes.},
   Journal = {Isit},
   Pages = {1095-1099},
   Publisher = {IEEE},
   Year = {2010},
   url = {http://dx.doi.org/10.1109/ISIT.2010.5513703},
   Doi = {10.1109/ISIT.2010.5513703},
   Key = {fds319377}
}

@article{fds319381,
   Author = {Nguyen, PS and Pfister, HD and Narayanan, KR},
   Title = {A rate-distortion perspective on multiple decoding attempts
             for Reed-Solomon codes},
   Journal = {2009 47th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2009},
   Pages = {1235-1242},
   Year = {2009},
   Month = {December},
   ISBN = {9781424458714},
   url = {http://dx.doi.org/10.1109/ALLERTON.2009.5394533},
   Abstract = {Recently, a number of authors have proposed decoding schemes
             for Reed-Solomon (RS) codes based on multiple trials of a
             simple RS decoding algorithm. In this paper, we present a
             rate-distortion (R-D) approach to analyze these
             multiple-decoding algorithms for RS codes. This approach is
             first used to understand the asymptotic performance-versus-complexity
             trade-off of multiple error-and-erasure decoding of RS
             codes. By defining an appropriate distortion measure between
             an error pattern and an erasure pattern, the condition for a
             single error-and-erasure decoding to succeed reduces to a
             form where the distortion is compared to a fixed threshold.
             Finding the best set of erasure patterns for multiple
             decoding trials then turns out to be a covering problem
             which can be solved asymptotically by rate-distortion
             theory. Next, this approach is extended to analyze multiple
             algebraic soft-decision (ASD) decoding of RS codes. Both
             analytical and numerical computations of the R-D functions
             for the corresponding distortion measures are discussed.
             Simulation results show that proposed algorithms using this
             approach perform better than other algorithms with the same
             complexity. ©2009 IEEE.},
   Doi = {10.1109/ALLERTON.2009.5394533},
   Key = {fds319381}
}

@article{fds319324,
   Author = {Yedla, A and Jian, YY and Nguyen, PS and Pfister,
             HD},
   Title = {A simple proof of maxwell saturation for coupled scalar
             recursions},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {60},
   Number = {11},
   Pages = {6943-6965},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2014},
   Month = {January},
   url = {http://dx.doi.org/10.1109/TIT.2014.2352296},
   Abstract = {Low-density parity-check (LDPC) convolutional codes (or
             spatially coupled codes) were recently shown to approach
             capacity on the binary erasure channel (BEC) and
             binary-input memoryless symmetric channels. The mechanism
             behind this spectacular performance is now called threshold
             saturation via spatial coupling. This new phenomenon is
             characterized by the belief-propagation threshold of the
             spatially coupled ensemble increasing to an intrinsic noise
             threshold defined by the uncoupled system. In this paper, we
             present a simple proof of threshold saturation that applies
             to a wide class of coupled scalar recursions. Our approach
             is based on constructing potential functions for both the
             coupled and uncoupled recursions. Our results actually show
             that the fixed point of the coupled recursion is essentially
             determined by the minimum of the uncoupled potential
             function and we refer to this phenomenon as Maxwell
             saturation. A variety of examples are considered including
             the density-evolution equations for: irregular LDPC codes on
             the BEC, irregular low-density generator-matrix codes on the
             BEC, a class of generalized LDPC codes with BCH component
             codes, the joint iterative decoding of LDPC codes on
             intersymbol-interference channels with erasure noise, and
             the compressed sensing of random vectors with independent
             identically distributed components.},
   Doi = {10.1109/TIT.2014.2352296},
   Key = {fds319324}
}

@article{fds319342,
   Author = {Yedla, A and Jian, YY and Nguyen, PS and Pfister,
             HD},
   Title = {A simple proof of threshold saturation for coupled scalar
             recursions},
   Journal = {International Symposium on Turbo Codes and Iterative
             Information Processing, Istc},
   Pages = {51-55},
   Year = {2012},
   Month = {December},
   ISBN = {9781457721151},
   url = {http://dx.doi.org/10.1109/ISTC.2012.6325197},
   Abstract = {Low-density parity-check (LDPC) convolutional codes (or
             spatially-coupled codes) have been shown to approach
             capacity on the binary erasure channel (BEC) and
             binary-input memoryless symmetric channels. The mechanism
             behind this spectacular performance is the threshold
             saturation phenomenon, which is characterized by the
             belief-propagation threshold of the spatially-coupled
             ensemble increasing to an intrinsic noise threshold defined
             by the uncoupled system. In this paper, we present a simple
             proof of threshold saturation that applies to a broad class
             of coupled scalar recursions. The conditions of the theorem
             are verified for the density-evolution (DE) equations of
             irregular LDPC codes on the BEC, a class of generalized LDPC
             codes, and the joint iterative decoding of LDPC codes on
             intersymbol-interference channels with erasure noise. Our
             approach is based on potential functions and was motivated
             mainly by the ideas of Takeuchi et al. The resulting proof
             is surprisingly simple when compared to previous methods. ©
             2012 IEEE.},
   Doi = {10.1109/ISTC.2012.6325197},
   Key = {fds319342}
}

@article{fds319344,
   Author = {Yedla, A and Jian, YY and Nguyen, PS and Pfister,
             HD},
   Title = {A simple proof of threshold saturation for coupled vector
             recursions},
   Journal = {2012 Ieee Information Theory Workshop, Itw
             2012},
   Pages = {25-29},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   ISBN = {9781467302234},
   url = {http://dx.doi.org/10.1109/ITW.2012.6404671},
   Abstract = {Convolutional low-density parity-check (LDPC) codes (or
             spatially-coupled codes) have now been shown to achieve
             capacity on binary-input memoryless symmetric channels. The
             principle behind this surprising result is the
             threshold-saturation phenomenon, which is defined by the
             belief-propagation threshold of the spatially-coupled
             ensemble saturating to a fundamental threshold defined by
             the uncoupled system. Previously, the authors demonstrated
             that potential functions can be used to provide a simple
             proof of threshold saturation for coupled scalar recursions.
             In this paper, we present a simple proof of threshold
             saturation that applies to a wide class of coupled vector
             recursions. The conditions of the theorem are verified for
             the density-evolution equations of: (i) joint decoding of
             irregular LDPC codes for a Slepian-Wolf problem with
             erasures, (ii) joint decoding of irregular LDPC codes on an
             erasure multiple-access channel, and (iii) admissible
             protograph codes on the BEC. This proves threshold
             saturation for these systems. © 2012 IEEE.},
   Doi = {10.1109/ITW.2012.6404671},
   Key = {fds319344}
}

@article{fds319354,
   Author = {Yedla, A and Jian, Y-Y and Nguyen, PS and Pfister,
             HD},
   Title = {A simple proof of threshold saturation for coupled vector
             recursions.},
   Journal = {Itw},
   Pages = {25-29},
   Publisher = {IEEE},
   Year = {2012},
   ISBN = {978-1-4673-0224-1},
   url = {http://dx.doi.org/10.1109/ITW.2012.6404671},
   Doi = {10.1109/ITW.2012.6404671},
   Key = {fds319354}
}

@article{fds324463,
   Author = {Sabag, O and Permuter, HH and Pfister, HD},
   Title = {A single-letter upper bound on the feedback capacity of
             unifilar finite-state channels},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {63},
   Number = {3},
   Pages = {1392-1409},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2017},
   Month = {March},
   url = {http://dx.doi.org/10.1109/TIT.2016.2636851},
   Abstract = {An upper bound on the feedback capacity of unifilar
             finite-state channels (FSCs) is derived. A new technique,
             called the Q-context mapping, is based on a construction of
             a directed graph that is used for a sequential quantization
             of the receiver's output sequences to a finite set of
             contexts. For any choice of Q-graph, the feedback capacity
             is bounded by a single-letter expression, Cfb ≤ sup I (X,
             S; Y|Q), where the supremum is over p(x|s, q) and the
             distribution of (S, Q) is their stationary distribution. It
             is shown that the bound is tight for all unifilar FSCs,
             where feedback capacity is known: channels where the state
             is a function of the outputs, the trapdoor channel, Ising
             channels, the no-consecutive-ones input-constrained erasure
             channel, and the memoryless channel. Its efficiency is also
             demonstrated by deriving a new capacity result for the
             dicode erasure channel; the upper bound is obtained directly
             from the above-mentioned expression and its tightness is
             concluded with a general sufficient condition on the
             optimality of the upper bound. This sufficient condition is
             based on a fixed point principle of the BCJR equation and,
             indeed, formulated as a simple lower bound on feedback
             capacity of unifilar FSCs for arbitrary Q-graphs. This upper
             bound indicates that a single-letter expression might exist
             for the capacity of finite-state channels with or without
             feedback based on a construction of auxiliary random
             variable with specified structure, such as the Q-graph, and
             not with i.i.d distribution. The upper bound also serves as
             a non-trivial bound on the capacity of channels without
             feedback, a problem that is still open.},
   Doi = {10.1109/TIT.2016.2636851},
   Key = {fds324463}
}

@article{fds322712,
   Author = {Sabag, O and Permuter, HH and Pfister, HD},
   Title = {A single-letter upper bound on the feedback capacity of
             unifilar finite-state channels},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2016-August},
   Pages = {310-314},
   Publisher = {IEEE},
   Year = {2016},
   Month = {August},
   ISBN = {9781509018062},
   url = {http://dx.doi.org/10.1109/ISIT.2016.7541311},
   Abstract = {A single-letter upper bound on the feedback capacity of a
             unifilar finite-state channel is derived. The upper bound is
             tight for all cases where the feedback capacity is known.
             Its efficiency is also demonstrated by direct application of
             the bound on the dicode erasure channel, which results in a
             new capacity result. The bound is based on a new technique,
             called the Q-contexts mapping, where the channel outputs are
             recursively quantized to a finite set, called the contexts
             set.},
   Doi = {10.1109/ISIT.2016.7541311},
   Key = {fds322712}
}

@article{fds319392,
   Author = {Pfister, HD and Sason, I},
   Title = {Accumulate-repeat-accumulate codes: Capacity-achieving
             ensembles of systematic codes for the erasure channel with
             bounded complexity},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {53},
   Number = {6},
   Pages = {2088-2115},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2007},
   Month = {June},
   url = {http://dx.doi.org/10.1109/TIT.2007.896873},
   Abstract = {This paper introduces ensembles of systematic
             accumulaterepeataccumulate (ARA) codes which asymptotically
             achieve capacity on the binary erasure channel (BEC) with
             bounded complexity, per information bit, of encoding and
             decoding. It also introduces symmetry properties which play
             a central role in the construction of new capacity-achieving
             ensembles for the BEC. The results here improve on the
             tradeoff between performance and complexity provided by
             previous constructions of capacity-achieving code ensembles
             defined on graphs. The superiority of ARA codes with
             moderate to large block length is exemplified by computer
             simulations which compare their performance with those of
             previously reported capacity-achieving ensembles of
             low-density parity-check (LDPC) and irregular
             repeataccumulate (IRA) codes. ARA codes also have the
             advantage of being systematic. © 2007 IEEE.},
   Doi = {10.1109/TIT.2007.896873},
   Key = {fds319392}
}

@article{fds319401,
   Author = {Pfister, HD and Sason, I},
   Title = {Accumulate-repeat-accumulate codes: Systematic codes
             achieving the binary erasure channel capacity with bounded
             complexity},
   Journal = {43rd Annual Allerton Conference on Communication, Control
             and Computing 2005},
   Volume = {1},
   Pages = {30-44},
   Year = {2005},
   Month = {January},
   ISBN = {9781604234916},
   Abstract = {The paper introduces ensembles of accumulate-repeat-accumulate
             (ARA) codes which asymptotically achieve capacity on the
             binary erasure channel (BEC) with bounded complexity per
             information bit. It also introduces symmetry properties
             which play a central role in the construction of
             capacity-achieving ensembles for the BEC. The results here
             improve on the tradeoff between performance and complexity
             provided by the first capacity-achieving ensembles of
             irregular repeat-accumulate (IRA) codes with bounded
             complexity per information bit; these IRA ensembles were
             previously constructed by Pfister, Sason and Urbanke. The
             superiority of ARA codes with moderate to large block length
             is exemplified by computer simulations which compare their
             performance with those of previously reported
             capacity-achieving ensembles of LDPC and IRA codes. The ARA
             codes also have the advantage of being systematic.},
   Key = {fds319401}
}

@article{fds364348,
   Author = {Brandsen, S and Lian, M and Stubbs, KD and Rengaswamy, N and Pfister,
             HD},
   Title = {Adaptive procedures for discriminating between arbitrary
             tensor-product quantum states},
   Journal = {Physical Review A},
   Volume = {106},
   Number = {1},
   Year = {2022},
   Month = {July},
   url = {http://dx.doi.org/10.1103/PhysRevA.106.012408},
   Abstract = {Discriminating between quantum states is a fundamental task
             in quantum information theory. Given two quantum states ρ+
             and ρ-, the Helstrom measurement distinguishes between them
             with minimal probability of error. However, finding and
             experimentally implementing the Helstrom measurement can be
             challenging for quantum states on many qubits. Due to this
             difficulty, there is great interest in identifying local
             measurement schemes which are close to optimal. In the first
             part of this work, we generalize previous work by Acin et
             al. [Phys. Rev. A 71, 032338 (2005)10.1103/PhysRevA.71.032338]
             and show that a locally greedy scheme using Bayesian
             updating can optimally distinguish between any two states
             that can be written as a tensor product of arbitrary pure
             states. We then show that the same algorithm cannot
             distinguish tensor products of mixed states with vanishing
             error probability (even in a large subsystem limit), and
             introduce a modified locally greedy scheme with strictly
             better performance. In the second part of this work, we
             compare these simple local schemes with a general dynamic
             programming approach which finds both the optimal series of
             local measurements as well as the optimal order in which
             subsystems are measured.},
   Doi = {10.1103/PhysRevA.106.012408},
   Key = {fds364348}
}

@article{fds352259,
   Author = {Brandsen, S and Lian, M and Stubbs, KD and Rengaswamy, N and Pfister,
             HD},
   Title = {Adaptive Procedures for Discriminating Between Arbitrary
             Tensor-Product Quantum States},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2020-June},
   Pages = {1933-1938},
   Year = {2020},
   Month = {June},
   url = {http://dx.doi.org/10.1109/ISIT44484.2020.9174234},
   Abstract = {Discriminating between quantum states is a fundamental task
             in quantum information theory. Given two quantum states, ρ+
             and ρ-, the Helstrom measurement distinguishes between them
             with minimal probability of error. However, finding and
             experimentally implementing the Helstrom measurement can be
             challenging for quantum states on many qubits. Due to this
             difficulty, there is a great interest in identifying local
             measurement schemes which are close to optimal. In the first
             part of this work, we generalize previous work by Acin et
             al. (Phys. Rev. A 71, 032338) and show that a locally greedy
             (LG) scheme using Bayesian updating can optimally
             distinguish between any two states that can be written as a
             tensor product of arbitrary pure states. We then show that
             the same algorithm cannot distinguish tensor products of
             mixed states with vanishing error probability (even in a
             large subsystem limit), and introduce a modified
             locally-greedy (MLG) scheme with strictly better
             performance. In the second part of this work, we compare
             these simple local schemes with a general dynamic
             programming (DP) approach. The DP approach finds the optimal
             series of local measurements and optimal order of subsystem
             measurement to distinguish between the two tensor-product
             states.1},
   Doi = {10.1109/ISIT44484.2020.9174234},
   Key = {fds352259}
}

@article{fds347672,
   Author = {Luo, Y and Pfister, H},
   Title = {Adversarial Defense of Image Classification Using a
             Variational Auto-Encoder},
   Volume = {abs/1812.02891},
   Year = {2018},
   Month = {December},
   Abstract = {Deep neural networks are known to be vulnerable to
             adversarial attacks. This exposes them to potential exploits
             in security-sensitive applications and highlights their lack
             of robustness. This paper uses a variational auto-encoder
             (VAE) to defend against adversarial attacks for image
             classification tasks. This VAE defense has a few nice
             properties: (1) it is quite flexible and its use of
             randomness makes it harder to attack; (2) it can learn
             disentangled representations that prevent blurry
             reconstruction; and (3) a patch-wise VAE defense strategy is
             used that does not require retraining for different size
             images. For moderate to severe attacks, this system
             outperforms or closely matches the performance of JPEG
             compression, with the best quality parameter. It also has
             more flexibility and potential for improvement via
             training.},
   Key = {fds347672}
}

@article{fds363942,
   Author = {Coskun, MC and Pfister, HD},
   Title = {An Information-Theoretic Perspective on Successive
             Cancellation List Decoding and Polar Code
             Design},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {68},
   Number = {9},
   Pages = {5779-5791},
   Year = {2022},
   Month = {September},
   url = {http://dx.doi.org/10.1109/TIT.2022.3173152},
   Abstract = {This work identifies information-theoretic quantities that
             are closely related to the required list size on average for
             successive cancellation list (SCL) decoding to implement
             maximum-likelihood decoding over general binary memoryless
             symmetric (BMS) channels. It also provides upper and lower
             bounds for these quantities that can be computed efficiently
             for very long codes. For the binary erasure channel (BEC),
             we provide a simple method to estimate the mean accurately
             via density evolution. The analysis shows how to modify,
             e.g., Reed-Muller codes, to improve the performance when
             practical list sizes, e.g., $L\in {[{8, 1024}]}$ , are
             adopted. Exemplary constructions with block lengths $N\in
             \{128,512\}$ outperform polar codes of 5G over the
             binary-input additive white Gaussian noise channel. It is
             further shown that there is a concentration around the mean
             of the logarithm of the required list size for sufficiently
             large block lengths, over discrete-output BMS channels. We
             provide the probability mass functions (p.m.f.s) of this
             logarithm, over the BEC, for a sequence of the modified RM
             codes with an increasing block length via simulations, which
             illustrate that the p.m.f.s concentrate around the estimated
             mean.},
   Doi = {10.1109/TIT.2022.3173152},
   Key = {fds363942}
}

@article{fds319328,
   Author = {Pfister, HD and Narayanan, KR},
   Title = {An introduction to spatially-coupled codes via practical
             examples},
   Journal = {2014 31th Ursi General Assembly and Scientific Symposium,
             Ursi Gass 2014},
   Publisher = {IEEE},
   Year = {2014},
   Month = {October},
   ISBN = {9781467352253},
   url = {http://dx.doi.org/10.1109/URSIGASS.2014.6929240},
   Abstract = {This paper uses examples to illustrate the ongoing
             transition in spatially-coupled coding from theory to
             practice. In the first example, we find that
             spatially-coupled codes have already been implemented by two
             different companies for 100 Gb/s optical communications.
             Both systems use variants of braided block codes, which were
             introduced even before the term spatial coupling. In the
             second example, we discuss the multiple-access channel and
             observe that some gains from spatial coupling were being
             realized in cellular base stations as early as 2007.
             Finally, we discuss some open questions regarding
             spatially-coupled codes.},
   Doi = {10.1109/URSIGASS.2014.6929240},
   Key = {fds319328}
}

@article{fds319361,
   Author = {Kim, BH and Pfister, HD},
   Title = {An iterative joint linear-programming decoding of LDPC codes
             and finite-state channels},
   Journal = {Ieee International Conference on Communications},
   Publisher = {IEEE},
   Year = {2011},
   Month = {September},
   ISBN = {9781612842332},
   url = {http://dx.doi.org/10.1109/icc.2011.5962814},
   Abstract = {In this paper, we introduce an efficient method for the
             joint linear-programming (LP) decoding of low-density
             parity-check (LDPC) codes and finite-state channels (FSCs).
             In particular, we extend the approach of iterative
             approximate LP decoding proposed by Vontobel and Koetter and
             further analyzed by Burshtein. By taking advantage of the
             dual-domain structure of the joint decoding LP, we obtain a
             convergent iterative algorithm for joint decoding. Our
             intent was to find an algorithm very similar to BCJR-based
             turbo equalization (TE) and it seems that we have succeeded
             in this respect. The result is an iterative solver for the
             joint decoding LP whose complexity is similar to TE but
             whose performance is similar to joint LP decoding. The main
             advantage of this decoder is that it appears to provide the
             predictability of joint LP decoding with the computational
             complexity of TE. © 2011 IEEE.},
   Doi = {10.1109/icc.2011.5962814},
   Key = {fds319361}
}

@article{fds319375,
   Author = {Kim, B-H and Pfister, HD},
   Title = {An Iterative Joint Linear-Programming Decoding of LDPC Codes
             and Finite-State Channels},
   Volume = {abs/1009.4352},
   Year = {2010},
   Key = {fds319375}
}

@article{fds319360,
   Author = {Zhang, F and Pfister, HD},
   Title = {Analysis of verification-based decoding on the q-ary
             symmetric channel for large q},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {57},
   Number = {10},
   Pages = {6754-6770},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {October},
   url = {http://dx.doi.org/10.1109/TIT.2011.2165813},
   Abstract = {A new verification-based message-passing decoder for
             low-density parity-check (LDPC) codes is introduced and
             analyzed for the q-ary symmetric channel (q-SC). Rather than
             passing messages consisting of symbol probabilities, this
             decoder passes lists of possible symbols and marks some
             lists as verified. The density evolution (DE) equations for
             this decoder are derived and used to compute decoding
             thresholds. If the maximum list size is unbounded, then one
             finds that any capacity-achieving LDPC code for the binary
             erasure channel can be used to achieve capacity on the q -SC
             for large q. The decoding thresholds are also computed via
             DE for the case where each list is truncated to satisfy a
             maximum list-size constraint. Simulation results are also
             presented to confirm the DE results. During the simulations,
             we observed differences between two verification-based
             decoding algorithms, introduced by Luby and Mitzenmacher,
             that were implicitly assumed to be identical. In this paper,
             the node-based algorithms are evaluated via analysis and
             simulation. The probability of false verification (FV) is
             also considered and techniques are discussed to mitigate the
             FV. Optimization of the degree distribution is also used to
             improve the threshold for a fixed maximum list size.
             Finally, the proposed algorithm is compared with a variety
             of other algorithms using both density evolution thresholds
             and simulation results. © 2011 IEEE.},
   Doi = {10.1109/TIT.2011.2165813},
   Key = {fds319360}
}

@article{fds327403,
   Author = {Jian, YY and Pfister, HD and Narayanan, KR},
   Title = {Approaching Capacity at High Rates with Iterative
             Hard-Decision Decoding},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {63},
   Number = {9},
   Pages = {5752-5773},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2017},
   Month = {September},
   url = {http://dx.doi.org/10.1109/TIT.2017.2717838},
   Abstract = {A variety of low-density parity-check (LDPC) ensembles have
             now been observed to approach capacity with message-passing
             decoding. However, all of them use soft (i.e., non-binary)
             messages and a posteriori probability decoding of their
             component codes. In this paper, we show that one can
             approach capacity at high rates using iterative
             hard-decision decoding (HDD) of generalized product codes.
             Specifically, a class of spatially coupled generalized LDPC
             codes with Bose-Chaudhuri-Hocquengham component codes is
             considered, and it is observed that, in the high-rate
             regime, they can approach capacity under the proposed
             iterative HDD. These codes can be seen as generalized
             product codes and are closely related to braided block
             codes. An iterative HDD algorithm is proposed that enables
             one to analyze the performance of these codes via density
             evolution.},
   Doi = {10.1109/TIT.2017.2717838},
   Key = {fds327403}
}

@article{fds319349,
   Author = {Jian, YY and Pfister, HD and Narayanan, KR},
   Title = {Approaching capacity at high rates with iterative
             hard-decision decoding},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {2696-2700},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISBN = {9781467325790},
   url = {http://dx.doi.org/10.1109/ISIT.2012.6284009},
   Abstract = {A variety of low-density parity-check (LDPC) ensembles have
             now been observed to approach capacity with message-passing
             decoding. However, all of them use soft (i.e., non-binary)
             messages and a posteriori probability (APP) decoding of
             their component codes. In this paper, we analyze a class of
             spatially-coupled generalized LDPC codes and observe that,
             in the high-rate regime, they can approach capacity under
             iterative hard-decision decoding. These codes can be seen as
             generalized product codes and are closely related to braided
             block codes. © 2012 IEEE.},
   Doi = {10.1109/ISIT.2012.6284009},
   Key = {fds319349}
}

@article{fds319352,
   Author = {Jian, Y-Y and Pfister, HD and Narayanan, KR},
   Title = {Approaching capacity at high rates with iterative
             hard-decision decoding.},
   Journal = {Isit},
   Pages = {2696-2700},
   Publisher = {IEEE},
   Year = {2012},
   ISBN = {978-1-4673-2580-6},
   url = {http://dx.doi.org/10.1109/ISIT.2012.6284009},
   Doi = {10.1109/ISIT.2012.6284009},
   Key = {fds319352}
}

@article{fds343757,
   Author = {Häger, C and Pfister, HD},
   Title = {Approaching Miscorrection-free Performance of Product and
             Generalized Product Codes},
   Volume = {abs/1711.07805},
   Year = {2017},
   Month = {November},
   Abstract = {Product codes (PCs) protect a two-dimensional array of bits
             using short component codes. Assuming transmission over the
             binary symmetric channel, the decoding is commonly performed
             by iteratively applying bounded-distance decoding to the
             component codes. For this coding scheme, undetected errors
             in the component decoding-also known as miscorrections-significantly
             degrade the performance. In this paper, we propose a novel
             iterative decoding algorithm for PCs which can detect and
             avoid most miscorrections. The algorithm can also be used to
             decode many recently proposed classes of generalized PCs
             such as staircase, braided, and half-product codes.
             Depending on the component code parameters, our algorithm
             significantly outperforms the conventional iterative
             decoding method. As an example, for double-error-correcting
             Bose-Chaudhuri-Hocquenghem component codes, the net coding
             gain can be increased by up to 0.4 dB. Moreover, the error
             floor can be lowered by orders of magnitude, up to the point
             where the decoder performs virtually identical to a
             genie-aided decoder that avoids all miscorrections. We also
             discuss post-processing techniques that can be used to
             reduce the error floor even further.},
   Key = {fds343757}
}

@article{fds336001,
   Author = {Hager, C and Pfister, HD},
   Title = {Approaching Miscorrection-Free Performance of Product Codes
             with Anchor Decoding},
   Journal = {Ieee Transactions on Communications},
   Volume = {66},
   Number = {7},
   Pages = {2797-2808},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2018},
   Month = {July},
   url = {http://dx.doi.org/10.1109/TCOMM.2018.2816073},
   Abstract = {Product codes (PCs) protect a 2-D array of bits using short
             component codes. Assuming transmission over the binary
             symmetric channel, the decoding is commonly performed by
             iteratively applying bounded-distance decoding to the
             component codes. For this coding scheme, undetected errors
             in the component decoding - also known as miscorrections -
             significantly degrade the performance. In this paper, we
             propose a novel iterative decoding algorithm for PCs which
             can detect and avoid most miscorrections. The algorithm can
             also be used to decode many recently proposed classes of
             generalized PCs, such as staircase, braided, and
             half-product codes. Depending on the component code
             parameters, our algorithm significantly outperforms the
             conventional iterative decoding method. As an example, for
             double-error-correcting Bose-Chaudhuri-Hocquenghem component
             codes, the net coding gain can be increased by up to 0.4 dB.
             Moreover, the error floor can be lowered by orders of
             magnitude, up to the point where the decoder performs
             virtually identical to a genie-aided decoder that avoids all
             miscorrections. We also discuss post-processing techniques
             that can be used to reduce the error floor even
             further.},
   Doi = {10.1109/TCOMM.2018.2816073},
   Key = {fds336001}
}

@article{fds342384,
   Author = {Fougstedt, C and Häger, C and Svensson, L and Pfister, HD and Larsson-Edefors, P},
   Title = {ASIC Implementation of Time-Domain Digital Backpropagation
             with Deep-Learned Chromatic Dispersion Filters},
   Journal = {European Conference on Optical Communication,
             Ecoc},
   Volume = {2018-September},
   Year = {2018},
   Month = {November},
   url = {http://dx.doi.org/10.1109/ECOC.2018.8535430},
   Abstract = {We consider time-domain digital backpropagation with
             chromatic dispersion filters jointly optimized and quantized
             using machine-learning techniques. Compared to the baseline
             implementations, we show improved BER performance and >40%
             power dissipation reductions in 28-nm CMOS.},
   Doi = {10.1109/ECOC.2018.8535430},
   Key = {fds342384}
}

@article{fds349208,
   Author = {Rengaswamy, N and Seshadreesan, KP and Guha, S and Pfister,
             HD},
   Title = {Belief propagation with quantum messages for
             quantum-enhanced classical communications},
   Journal = {Npj Quantum Information},
   Volume = {7},
   Number = {1},
   Year = {2021},
   Month = {December},
   url = {http://dx.doi.org/10.1038/s41534-021-00422-1},
   Abstract = {For space-based laser communications, when the mean photon
             number per received optical pulse is much smaller than one,
             there is a large gap between communications capacity
             achievable with a receiver that performs individual
             pulse-by-pulse detection, and the quantum-optimal
             “joint-detection receiver” that acts collectively on
             long codeword-blocks of modulated pulses; an effect often
             termed “superadditive capacity”. In this paper, we
             consider the simplest scenario where a large superadditive
             capacity is known: a pure-loss channel with a coherent-state
             binary phase-shift keyed (BPSK) modulation. The two BPSK
             states can be mapped conceptually to two non-orthogonal
             states of a qubit, described by an inner product that is a
             function of the mean photon number per pulse. Using this
             map, we derive an explicit construction of the quantum
             circuit of a joint-detection receiver based on a recent idea
             of “belief-propagation with quantum messages” (BPQM). We
             quantify its performance improvement over the Dolinar
             receiver that performs optimal pulse-by-pulse detection,
             which represents the best “classical” approach. We
             analyze the scheme rigorously and show that it achieves the
             quantum limit of minimum average error probability in
             discriminating 8 (BPSK) codewords of a length-5 binary
             linear code with a tree factor graph. Our result suggests
             that a BPQM receiver might attain the Holevo capacity of
             this BPSK-modulated pure-loss channel. Moreover, our
             receiver circuit provides an alternative proposal for a
             quantum supremacy experiment, targeted at a specific
             application that can potentially be implemented on a small,
             special-purpose, photonic quantum computer capable of
             performing cat-basis universal qubit logic.},
   Doi = {10.1038/s41534-021-00422-1},
   Key = {fds349208}
}

@article{fds368933,
   Author = {Brandsen, S and Mandal, A and Pfister, HD},
   Title = {Belief Propagation with Quantum Messages for Symmetric
             Classical-Quantum Channels},
   Journal = {2022 Ieee Information Theory Workshop, Itw
             2022},
   Pages = {494-499},
   Year = {2022},
   Month = {January},
   ISBN = {9781665483414},
   url = {http://dx.doi.org/10.1109/ITW54588.2022.9965841},
   Abstract = {Belief propagation (BP) is a classical algorithm that
             approximates the marginal distribution associated with a
             factor graph by passing messages between adjacent nodes in
             the graph. It gained popularity in the 1990's as a powerful
             decoding algorithm for LDPC codes. In 2016, Renes introduced
             a belief propagation with quantum messages (BPQM) and
             described how it could be used to decode classical codes
             defined by tree factor graphs that are sent over the
             classical-quantum pure-state channel. In this work, we
             propose an extension of BPQM to general binary-input
             symmetric classical-quantum (BSCQ) channels based on the
             implementation of a symmetric paired measurement. While this
             new paired-measurement BPQM (PMBPQM) approach is suboptimal
             in general, it provides a concrete BPQM decoder that can be
             implemented with local operations. Finally, we demonstrate
             that density evolution can be used to analyze the
             performance of PMBPQM on tree factor graphs. As an
             application, we compute noise thresholds of some LDPC codes
             with BPQM decoding for a class of BSCQ channels.},
   Doi = {10.1109/ITW54588.2022.9965841},
   Key = {fds368933}
}

@article{fds322717,
   Author = {Lian, M and Pfister, HD},
   Title = {Belief-propagation reconstruction for compressed sensing:
             Quantization vs. Gaussian approximation},
   Journal = {2015 53rd Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2015},
   Pages = {1106-1113},
   Publisher = {IEEE},
   Year = {2016},
   Month = {April},
   ISBN = {9781509018239},
   url = {http://dx.doi.org/10.1109/ALLERTON.2015.7447132},
   Abstract = {This work considers the compressed sensing (CS) of i.i.d.
             signals with sparse measurement matrices and
             belief-propagation (BP) reconstruction. In general, BP
             reconstruction for CS requires the passing of messages that
             are distributions over the real numbers. To implement this
             in practice, one typically uses either quantized
             distributions or a Gaussian approximation. In this work, we
             use density evolution to compare the reconstruction
             performance of these two methods. Since the reconstruction
             performance depends on the signal realization, this analysis
             makes use of a novel change of variables to analyze the
             performance for a typical signal. Simulation results are
             provided to support the results.},
   Doi = {10.1109/ALLERTON.2015.7447132},
   Key = {fds322717}
}

@article{fds322709,
   Author = {Kumar, S and Calderbank, R and Pfister, HD},
   Title = {Beyond double transitivity: Capacity-achieving cyclic codes
             on erasure channels},
   Journal = {2016 Ieee Information Theory Workshop, Itw
             2016},
   Pages = {241-245},
   Publisher = {IEEE},
   Year = {2016},
   Month = {October},
   ISBN = {9781509010905},
   url = {http://dx.doi.org/10.1109/ITW.2016.7606832},
   Abstract = {Recently, sequences of error-correcting codes with
             doubly-transitive permutation groups were shown to achieve
             capacity on erasure channels under symbol-wise maximum a
             posteriori (MAP) decoding. From this, it follows that
             Reed-Muller and primitive narrow-sense BCH codes achieve
             capacity in the same setting. In this article, we extend
             this result to a large family of cyclic codes by considering
             codes whose permutation groups satisfy a condition weaker
             than double transitivity. The article combines two simple
             technical contributions. First, we show that the transition
             width of a monotone boolean function is O(1/log k), where k
             is the size of the smallest orbit induced by its symmetry
             group. The proof is based on Talagrand's lower bound on
             influences for monotone boolean functions. Second, we
             consider the extrinsic information transfer (EXIT) function
             of an Fq-linear cyclic code whose blocklength N divides qt-1
             and is coprime with q-1. We show that this EXIT function is
             a monotone boolean function whose symmetry group contains no
             orbits of size smaller than the smallest prime divisor of t.
             Combining these, we show that sequences of cyclic codes,
             whose blocklengths satisfy the above conditions, achieve
             capacity on the q-ary erasure channel if all prime divisors
             of t tend to infinity.},
   Doi = {10.1109/ITW.2016.7606832},
   Key = {fds322709}
}

@article{fds343277,
   Author = {Pfister, H and Sason, I and Urbanke, R},
   Title = {Bounds on the decoding complexity of punctured codes on
             graphs},
   Year = {2004},
   Month = {September},
   Abstract = {We present two sequences of ensembles of non-systematic
             irregular repeat-accumulate codes which asymptotically (as
             their block length tends to infinity) achieve capacity on
             the binary erasure channel (BEC) with bounded complexity per
             information bit. This is in contrast to all previous
             constructions of capacity-achieving sequences of ensembles
             whose complexity grows at least like the log of the inverse
             of the gap (in rate) to capacity. The new bounded complexity
             result is achieved by puncturing bits, and allowing in this
             way a sufficient number of state nodes in the Tanner graph
             representing the codes. We also derive an
             information-theoretic lower bound on the decoding complexity
             of randomly punctured codes on graphs. The bound holds for
             every memoryless binary-input output-symmetric channel, and
             is refined for the BEC.},
   Key = {fds343277}
}

@article{fds352873,
   Author = {Coskun, MC and Pfister, HD},
   Title = {Bounds on the List Size of Successive Cancellation List
             Decoding},
   Journal = {Spcom 2020 International Conference on Signal Processing and
             Communications},
   Year = {2020},
   Month = {July},
   ISBN = {9781728188959},
   url = {http://dx.doi.org/10.1109/SPCOM50965.2020.9179593},
   Abstract = {Successive cancellation list decoding of polar codes
             provides very good performance for short to moderate block
             lengths. However, the list size required to approach the
             performance of maximum-likelihood decoding is still not well
             understood theoretically. This work identifies
             information-theoretic quantities that are closely related to
             this required list size. It also provides a natural
             approximation for these quantities that can be computed
             efficiently even for very long codes. Simulation results are
             provided for the binary erasure channel as well as the
             binary-input additive white Gaussian noise
             channel.},
   Doi = {10.1109/SPCOM50965.2020.9179593},
   Key = {fds352873}
}

@article{fds319382,
   Author = {Yedla, A and Pfister, HD and Narayanan, KR},
   Title = {Can iterative decoding for erasure correlated sources be
             universal?},
   Journal = {2009 47th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2009},
   Pages = {408-415},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   ISBN = {9781424458714},
   url = {http://dx.doi.org/10.1109/ALLERTON.2009.5394794},
   Abstract = {In this paper, we consider a few iterative decoding schemes
             for the joint source-channel coding of correlated sources.
             Specifically, we consider the joint source-channel coding of
             two erasure correlated sources with transmission over
             different erasure channels. Our main interest is in
             determining whether or not various code ensembles can
             achieve the capacity region universally over varying channel
             conditions. We consider two ensembles in the class of
             low-density generator-matrix (LDGM) codes known as
             Luby-Transform (LT) codes and one ensemble of low-density
             parity-check (LDPC) codes.We analyze them using density
             evolution and show that optimized LT codes can achieve the
             extremal symmetric point of the capacity region. We also
             show that LT codes are not universal under iterative
             decoding for this problem because they cannot simultaneously
             achieve the extremal symmetric point and a corner point of
             the capacity region. The sub-universality of iterative
             decoding is characterized by studying the density evolution
             for LT codes. ©2009 IEEE.},
   Doi = {10.1109/ALLERTON.2009.5394794},
   Key = {fds319382}
}

@article{fds319391,
   Author = {Diggavi, S and Mitzenmacher, M and Pfister, HD},
   Title = {Capacity upper bounds for the deletion channel},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {1716-1720},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   ISBN = {1424414296},
   url = {http://dx.doi.org/10.1109/ISIT.2007.4557469},
   Abstract = {We present two upper bounds on the capacity of the i.i.d.
             binary deletion channel, where each bit is independently
             deleted with a fixed probability d. The first can be
             numerically evaluated for any fixed d. The second provides
             an asymptotic upper bound as d goes to 1. These appeal to be
             the fiist non-trivial upper bounds for this probabilistic
             deletion channel. ©2007 IEEE.},
   Doi = {10.1109/ISIT.2007.4557469},
   Key = {fds319391}
}

@article{fds319400,
   Author = {Pfister, HD and Sason, I and Urbanke, R},
   Title = {Capacity-achieving ensembles for the binary erasure channel
             with bounded complexity},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {51},
   Number = {7},
   Pages = {2352-2379},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2005},
   Month = {July},
   url = {http://dx.doi.org/10.1109/TIT.2005.850079},
   Abstract = {We present two sequences of ensembles of non-systematic
             irregular repeat-accumulate (IRA) codes which asymptotically
             (as their block length tends to infinity) achieve capacity
             on the binary erasure channel (BEC) with bounded complexity
             per information bit. This is in contrast to all previous
             constructions of capacity-achieving sequences of ensembles
             whose complexity grows at least like the log of the inverse
             of the gap (in rate) to capacity. The new bounded complexity
             result is achieved by puncturing bits, and allowing in this
             way a sufficient number of state nodes in the Tanner graph
             representing the codes. We derive an information-theoretic
             lower bound on the decoding complexity of randomly punctured
             codes on graphs. The bound holds for every memoryless
             binary-input output-symmetric (MBIOS) channel and is refined
             for the binary erasure channel. © 2005 IEEE.},
   Doi = {10.1109/TIT.2005.850079},
   Key = {fds319400}
}

@article{fds322721,
   Author = {Pfister, H and Sason, I and Urbanke, R},
   Title = {Capacity-achieving ensembles for the binary erasure channel
             with bounded complexity},
   Journal = {Ieee Convention of Electrical and Electronics Engineers in
             Israel, Proceedings},
   Pages = {110-113},
   Publisher = {IEEE},
   Year = {2004},
   Month = {December},
   ISBN = {078038427X},
   url = {http://dx.doi.org/10.1109/eeei.2004.1361101},
   Abstract = {We present two sequences of ensembles of non-systematic
             irregular repeat-accumulate codes which asymptotically (as
             their block length tends to infinity) achieve capacity on
             the binary erasure channel (BEC) with bounded complexity.
             This is in contrast to all previous constructions of
             capacity-achieving sequences of ensembles whose complexity
             grows at least like the log of the inverse of the gap to
             capacity. The new bounded complexity result is achieved by
             allowing a sufficient number of state nodes in the Tanner
             graph representing the codes. ©2004 IEEE.},
   Doi = {10.1109/eeei.2004.1361101},
   Key = {fds322721}
}

@article{fds322722,
   Author = {Pfister, H and Sason, I and Urbanke, R},
   Title = {Capacity-achieving ensembles for the binary erasure channel
             with bounded complexity},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {207},
   Year = {2004},
   Month = {October},
   Abstract = {We present two sequences of ensembles of non-systematic
             irregular repeat-accumulate codes which asymptotically (as
             their block length tends to infinity) achieve capacity on
             the binary erasure channel (BEC) with bounded complexity.
             This is in contrast to all previous constructions of
             capacity-achieving sequences of ensembles whose complexity
             grows at least like the log of the inverse of the gap to
             capacity. The new bounded complexity result is achieved by
             allowing a sufficient number of state nodes in the Tanner
             graph representing the codes.},
   Key = {fds322722}
}

@article{fds319402,
   Author = {Pfister, HD and Sason, I},
   Title = {Capacity-Achieving Ensembles of Accumulate-Repeat-Accumulate
             Codes for the Erasure Channel with Bounded
             Complexity},
   Volume = {abs/cs/0512006},
   Year = {2005},
   Key = {fds319402}
}

@article{fds319405,
   Author = {Hou, J and Siegel, PH and Milstein, LB and Pfister,
             HD},
   Title = {Capacity-Approaching Bandwidth-Efficient Coded Modulation
             Schemes Based on Low-Density Parity-Check
             Codes},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {49},
   Number = {9},
   Pages = {2141-2155+2322},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2003},
   Month = {September},
   url = {http://dx.doi.org/10.1109/TIT.2003.815777},
   Abstract = {We design multilevel coding (MLC) and bit-interleaved coded
             modulation (BICM) schemes based on low-density parity-check
             (LDPC) codes. The analysis and optimization of the LDPC
             component codes for the MLC and BICM schemes are complicated
             because, in general, the equivalent binary-input component
             channels are not necessarily symmetric. To overcome this
             obstacle, we deploy two different approaches: one based on
             independent and identically distributed (i.i.d.) channel
             adapters and the other based on coset codes. By
             incorporating i.i.d. channel adapters, we can force the
             symmetry of each binary-input component channel. By
             considering coset codes, we extend the concentration theorem
             based on previous work by Richardson et al. and Kavčić et
             al. We also discuss the relation between the systems based
             on the two approaches and show that they indeed have the
             same expected decoder behavior. Next, we jointly optimize
             the code rates and degree distribution pairs of the LDPC
             component codes for the MLC scheme. The optimized irregular
             LDPC codes at each level of MLC with multistage decoding
             (MSD) are able to perform well at signal-to-noise ratios
             (SNR) very close to the capacity of the additive white
             Gaussian noise (AWGN) channel. We also show that the
             optimized BICM scheme can approach the parallel independent
             decoding (PID) capacity as closely as does the MLC/PID
             scheme. Simulations with very large codeword length verify
             the accuracy of the analytical results. Finally, we compare
             the simulated performance of these coded modulation schemes
             at finite codeword lengths, and consider the results from
             the perspective of a random coding exponent
             analysis.},
   Doi = {10.1109/TIT.2003.815777},
   Key = {fds319405}
}

@article{fds349207,
   Author = {Rengaswamy, N and Calderbank, R and Newman, M and Pfister,
             HD},
   Title = {Classical Coding Problem from Transversal T
             Gates},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2020-June},
   Pages = {1891-1896},
   Year = {2020},
   Month = {June},
   url = {http://dx.doi.org/10.1109/ISIT44484.2020.9174408},
   Abstract = {Universal quantum computation requires the implementation of
             a logical non-Clifford gate. In this paper, we characterize
             all stabilizer codes whose code subspaces are preserved
             under physical T and T† gates. For example, this could
             enable magic state distillation with non-CSS codes and,
             thus, provide better parameters than CSS-based protocols.
             However, among non-degenerate stabilizer codes that support
             transversal T, we prove that CSS codes are optimal. We also
             show that triorthogonal codes are, essentially, the only
             family of CSS codes that realize logical transversal T via
             physical transversal T. Using our algebraic approach, we
             reveal new purely-classical coding problems that are
             intimately related to the realization of logical operations
             via transversal T. Decreasing monomial codes are also used
             to construct a code that realizes logical CCZ. Finally, we
             use Ax's theorem to characterize the logical operation
             realized on a family of quantum Reed-Muller codes. This
             result is generalized to finer angle Z-rotations in
             https://arxiv.org/abs/1910.09333.},
   Doi = {10.1109/ISIT44484.2020.9174408},
   Key = {fds349207}
}

@article{fds319336,
   Author = {Yedla, A and Pfister, HD and Narayanan, KR},
   Title = {Code design for the noisy Slepian-Wolf problem},
   Journal = {Ieee Transactions on Communications},
   Volume = {61},
   Number = {6},
   Pages = {2535-2545},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2013},
   Month = {June},
   url = {http://dx.doi.org/10.1109/TCOMM.2013.032713.120002A},
   Abstract = {We consider a noisy Slepian-Wolf problem where two
             correlated sources are separately encoded (using codes of
             fixed rate) and transmitted over two independent binary
             memoryless symmetric channels. The capacity of each channel
             is characterized by a single parameter that is not known at
             the transmitter. System performance is evaluated by
             computing the set of channel parameters for which the system
             can successfully decode. This set is called the achievable
             channel parameter region (ACPR). The goal is to design
             systems whose ACPRs are as large as possible. The main
             result is the design of irregular low-density paritycheck
             (LDPC) ensembles whose ACPRs are significantly larger than
             previous designs. Some previous attempts to achieve large
             ACPRs with LDPC codes failed because systematic codes were
             used. In this work, we start with systematic encoders but
             puncture all the systematic bits before transmission. We
             also show that additional gains are possible using a
             staggered structure which enables codes optimized for
             single-user channels to perform well under symmetric channel
             conditions. The main analysis tool is a generic
             density-evolution framework for the analysis of joint
             iterative decoding for this problem.},
   Doi = {10.1109/TCOMM.2013.032713.120002A},
   Key = {fds319336}
}

@article{fds322720,
   Author = {Parag, P and Chamberland, JF and Pfister, H and Narayanan,
             K},
   Title = {Code rate, queueing behavior and the correlated erasure
             channel},
   Journal = {Ieee Information Theory Workshop 2010, Itw
             2010},
   Publisher = {IEEE},
   Year = {2010},
   Month = {July},
   ISBN = {9781424463725},
   url = {http://dx.doi.org/10.1109/ITWKSPS.2010.5503186},
   Abstract = {This paper considers the relationship between coderate
             selection and queueing performance for communication systems
             with time-varying parameters. While error-correcting codes
             offer protection against channel unreliability, there is a
             tradeoff between the enhanced protection of low-rate codes
             and the increased information transfer of high-rate codes.
             Hence, there exists a natural compromise between
             packet-level error protection and information rate. In the
             limiting regime where codewords are asymptotically long,
             this tradeoff is well-understood and characterized by the
             Shannon capacity. However, for delay-sensitive communication
             systems and finite code-lengths, a complete characterization
             of this tradeoff is still not fully developed. This paper
             offers a new perspective on the queueing performance of
             communication systems with finite block-lengths operating
             over correlated erasure channels. A rigorous framework that
             links code rate to overall system performance for random
             codes is presented. Guidelines for code rate selection in
             delay-sensitive systems are identified.},
   Doi = {10.1109/ITWKSPS.2010.5503186},
   Key = {fds322720}
}

@article{fds319338,
   Author = {Parag, P and Chamberland, JF and Pfister, HD and Narayanan,
             K},
   Title = {Code-rate selection, queueing behavior, and the correlated
             erasure channel},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {59},
   Number = {1},
   Pages = {397-407},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2013},
   Month = {January},
   url = {http://dx.doi.org/10.1109/TIT.2012.2216501},
   Abstract = {This paper considers the relationship between code-rate
             selection and queueing performance for communication systems
             subject to time-varying channel conditions. While
             error-correcting codes offer protection against channel
             uncertainties, there exists a natural tradeoff between the
             enhanced protection of low-rate codes and the rate penalty
             imposed by additional redundancy. In the limiting regime
             where codewords are asymptotically long, this tradeoff is
             well understood and characterized by the Shannon capacity.
             However, for delay-sensitive communication systems and
             finite block lengths, a complete characterization of this
             tradeoff is not fully developed. This paper offers a new
             perspective on the queueing performance of communication
             systems with finite block lengths operating over correlated
             erasure channels. A rigorous framework that links code rate
             to overall system performance for random codes is presented.
             Guidelines for code-rate selection in delay-sensitive
             systems are identified. These findings are supported by a
             numerical study. © 2012 IEEE.},
   Doi = {10.1109/TIT.2012.2216501},
   Key = {fds319338}
}

@article{fds319312,
   Author = {Kudekar, S and Kumar, S and Mondelli, M and Pfister, HD and Urbankez,
             R},
   Title = {Comparing the bit-MAP and block-MAP decoding thresholds of
             reed-muller codes on BMS channels},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2016-August},
   Pages = {1755-1759},
   Publisher = {IEEE},
   Year = {2016},
   Month = {August},
   ISBN = {9781509018062},
   url = {http://dx.doi.org/10.1109/ISIT.2016.7541600},
   Abstract = {The question whether RM codes are capacity-achieving is a
             long-standing open problem in coding theory that was
             recently answered in the affirmative for transmission over
             erasure channels [1], [2]. Remarkably, the proof does not
             rely on specific properties of RM codes, apart from their
             symmetry. Indeed, the main technical result consists in
             showing that any sequence of linear codes, with
             doubly-transitive permutation groups, achieves capacity on
             the memoryless erasure channel under bit-MAP decoding. Thus,
             a natural question is what happens under block-MAP decoding.
             In [1], [2], by exploiting further symmetries of the code,
             the bit-MAP threshold was shown to be sharp enough so that
             the block erasure probability also converges to 0. However,
             this technique relies heavily on the fact that the
             transmission is over an erasure channel. We present an
             alternative approach to strengthen results regarding the
             bit-MAP threshold to block-MAP thresholds. This approach is
             based on a careful analysis of the weight distribution of RM
             codes. In particular, the flavor of the main result is the
             following: assume that the bit-MAP error probability decays
             as N-δ, for some δ > 0. Then, the block-MAP error
             probability also converges to 0. This technique applies to
             transmission over any binary memoryless symmetric channel.
             Thus, it can be thought of as a first step in extending the
             proof that RM codes are capacity-achieving to the general
             case.},
   Doi = {10.1109/ISIT.2016.7541600},
   Key = {fds319312}
}

@article{fds319386,
   Author = {Zhang, F and Pfister, HD},
   Title = {Compressed sensing and linear codes over real
             numbers},
   Journal = {2008 Information Theory and Applications Workshop Conference
             Proceedings, Ita},
   Pages = {558-561},
   Publisher = {IEEE},
   Year = {2008},
   Month = {October},
   ISBN = {1424426707},
   url = {http://dx.doi.org/10.1109/ITA.2008.4601055},
   Abstract = {Compressed sensing (CS) is a relatively new area of signal
             processing and statistics that focuses on signal
             reconstruction from a small number of linear (e.g., dot
             product) measurements. In this paper, we analyze CS using
             tools from coding theory because CS can also be viewed as
             syndrome-based source coding of sparse vectors using linear
             codes over real numbers. While coding theory does not
             typically deal with codes over real numbers, there is
             actually a very close relationship between CS and
             error-correcting codes over large discrete alphabets. This
             connection leads naturally to new reconstruction methods and
             analysis. In some cases, the resulting methods provably
             require many fewer measurements than previous
             approaches.},
   Doi = {10.1109/ITA.2008.4601055},
   Key = {fds319386}
}

@article{fds319388,
   Author = {Zhang, F and Pfister, HD},
   Title = {Compressed Sensing and Linear Codes over Real
             Numbers},
   Journal = {2008 Information Theory and Applications
             Workshop},
   Pages = {278-281},
   Publisher = {IEEE},
   Year = {2008},
   Month = {January},
   ISBN = {978-1-4244-2670-6},
   Key = {fds319388}
}

@article{fds319369,
   Author = {Jian, YY and Pfister, HD},
   Title = {Convergence of weighted min-sum decoding via dynamic
             programming on coupled trees},
   Journal = {6th International Symposium on Turbo Codes and Iterative
             Information Processing, Istc 2010},
   Pages = {487-491},
   Publisher = {IEEE},
   Year = {2010},
   Month = {November},
   ISBN = {9781424467457},
   url = {http://dx.doi.org/10.1109/ISTC.2010.5613901},
   Abstract = {Applying the max-product (and belief-propagation) algorithms
             to loopy graphs is now quite popular for constraint
             satisfaction problems. This is largely due to their low
             computational complexity and impressive performance in
             practice. Still, there is no general understanding of the
             conditions required for convergence and/or the optimality of
             converged solutions. This paper presents an analysis of
             weighted min-sum (a.k.a. attenuated max-product) decoding
             for LDPC codes that guarantees convergence to a fixed point
             when the weight β is sufficiently small. It also shows
             that, if the fixed point satisfies all the constraints, then
             it must be both the linear-programming (LP) and
             maximumlikelihood (ML) solution. For (dv, dc)-regular LDPC
             codes, the weight must satisfy 1/β > dv - 1 whereas the
             result of Koetter and Frey requires instead that 1/β > (dv
             - l)(dc - 1). A counterexample is also given that shows a
             fixed point might not be the ML solution if 1/β < dv - 1.
             Finally, connections are explored with recent work by Arora
             et al. on the threshold of LP decoding.},
   Doi = {10.1109/ISTC.2010.5613901},
   Key = {fds319369}
}

@article{fds319325,
   Author = {Jian, YY and Pfister, HD},
   Title = {Convergence of weighted min-sum decoding via dynamic
             programming on trees},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {60},
   Number = {2},
   Pages = {943-963},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2014},
   Month = {January},
   url = {http://dx.doi.org/10.1109/TIT.2013.2290303},
   Abstract = {Applying the max-product (and sum-product) algorithms to
             loopy graphs is now quite popular for best assignment
             problems. This is largely due to their low computational
             complexity and impressive performance in practice. Still,
             there is no general understanding of the conditions required
             for convergence or optimality of converged solutions or
             both. This paper presents an analysis of both attenuated
             max-product decoding and weighted min-sum decoding for
             low-density paritycheck (LDPC) codes, which guarantees
             convergence to a fixed point when a weight factor, β is
             sufficiently small. It also shows that, if the fixed point
             satisfies some consistency conditions, then it must be both
             a linear-programming (LP) and maximumlikelihood (ML)
             decoding solution. For (dv, dc)-regular LDPC codes, the
             weight factor must satisfy.©2013 IEEE.},
   Doi = {10.1109/TIT.2013.2290303},
   Key = {fds319325}
}

@article{fds328986,
   Author = {Charbonneau, P and Li, YC and Pfister, HD and Yaida,
             S},
   Title = {Cycle-expansion method for the Lyapunov exponent,
             susceptibility, and higher moments.},
   Journal = {Physical Review. E},
   Volume = {96},
   Number = {3-1},
   Pages = {032129},
   Year = {2017},
   Month = {September},
   url = {http://dx.doi.org/10.1103/physreve.96.032129},
   Abstract = {Lyapunov exponents characterize the chaotic nature of
             dynamical systems by quantifying the growth rate of
             uncertainty associated with the imperfect measurement of
             initial conditions. Finite-time estimates of the exponent,
             however, experience fluctuations due to both the initial
             condition and the stochastic nature of the dynamical path.
             The scale of these fluctuations is governed by the Lyapunov
             susceptibility, the finiteness of which typically provides a
             sufficient condition for the law of large numbers to apply.
             Here, we obtain a formally exact expression for this
             susceptibility in terms of the Ruelle dynamical ζ function
             for one-dimensional systems. We further show that, for
             systems governed by sequences of random matrices, the cycle
             expansion of the ζ function enables systematic computations
             of the Lyapunov susceptibility and of its higher-moment
             generalizations. The method is here applied to a class of
             dynamical models that maps to static disordered spin chains
             with interactions stretching over a varying distance and is
             tested against Monte Carlo simulations.},
   Doi = {10.1103/physreve.96.032129},
   Key = {fds328986}
}

@article{fds319317,
   Author = {Rengaswamy, N and Pfister, HD},
   Title = {Cyclic polar codes},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2015-June},
   Pages = {1287-1291},
   Publisher = {IEEE},
   Year = {2015},
   Month = {September},
   ISBN = {9781467377041},
   url = {http://dx.doi.org/10.1109/ISIT.2015.7282663},
   Abstract = {Arikan introduced polar codes in 2009 and proved that they
             achieve the symmetric capacity, under low-complexity
             successive cancellation decoding, of any binary-input
             discrete memoryless channel. Arikan's construction is based
             on the Kronecker product of 2-by-2 matrices and it was
             extended to larger matrices by Şaşoǧlu et al. in 2010. In
             this paper, we construct cyclic polar codes based on a
             mixed-radix Cooley-Tukey decomposition of the Galois field
             Fourier transform. Ignoring the twiddle factors between
             stages, the derived fast Fourier transform is essentially a
             Kronecker product of small Fourier transform matrices. Thus,
             one can define a successive cancellation decoder and observe
             that the coordinate channels polarize. Choosing the
             locations of the frozen symbols in the resulting polar code
             is identical to choosing the locations of zeros in the
             Fourier transform of the codewords and, thus, the code is
             cyclic.},
   Doi = {10.1109/ISIT.2015.7282663},
   Key = {fds319317}
}

@article{fds337695,
   Author = {Santi, E and Hager, C and Pfister, HD},
   Title = {Decoding Reed-Muller Codes Using Minimum- Weight Parity
             Checks},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2018-June},
   Pages = {1296-1300},
   Publisher = {IEEE},
   Year = {2018},
   Month = {August},
   url = {http://dx.doi.org/10.1109/ISIT.2018.8437637},
   Abstract = {Reed-Muller (RM) codes exhibit good performance under
             maximum-likelihood (ML) decoding due to their
             highly-symmetric structure. In this paper, we explore the
             question of whether the code symmetry of RM codes can also
             be exploited to achieve near-ML performance in practice. The
             main idea is to apply iterative decoding to a
             highly-redundant parity-check (PC) matrix that contains only
             the minimum-weight dual codewords as rows. As examples, we
             consider the peeling decoder for the binary erasure channel,
             linear-programming and belief propagation (BP) decoding for
             the binary-input additive white Gaussian noise channel, and
             bit-flipping and BP decoding for the binary symmetric
             channel. For short block lengths, it is shown that near-ML
             performance can indeed be achieved in many cases. We also
             propose a method to tailor the PC matrix to the received
             observation by selecting only a small fraction of useful
             minimum-weight PCs before decoding begins. This allows one
             to both improve performance and significantly reduce
             complexity compared to using the full set of minimum-weight
             PCs.},
   Doi = {10.1109/ISIT.2018.8437637},
   Key = {fds337695}
}

@article{fds352263,
   Author = {Lian, M and Hager, C and Pfister, HD},
   Title = {Decoding Reed-Muller Codes Using Redundant Code
             Constraints},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2020-June},
   Pages = {42-47},
   Year = {2020},
   Month = {June},
   ISBN = {9781728164328},
   url = {http://dx.doi.org/10.1109/ISIT44484.2020.9174087},
   Abstract = {The recursive projection-aggregation (RPA) decoding
             algorithm for Reed-Muller (RM) codes was recently introduced
             by Ye and Abbe. We show that the RPA algorithm is closely
             related to (weighted) belief-propagation (BP) decoding by
             interpreting it as a message-passing algorithm on a factor
             graph with redundant code constraints. We use this
             observation to introduce a novel decoder tailored to
             high-rate RM codes. The new algorithm relies on puncturing
             rather than projections and is called recursive
             puncturing-aggregation (RXA). We also investigate collapsed
             (i.e., non-recursive) versions of RPA and RXA and show some
             examples where they achieve similar performance with lower
             decoding complexity.},
   Doi = {10.1109/ISIT44484.2020.9174087},
   Key = {fds352263}
}

@article{fds337694,
   Author = {Hager, C and Pfister, HD},
   Title = {Deep Learning of the Nonlinear Schrödinger Equation in
             Fiber-Optic Communications},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2018-June},
   Pages = {1590-1594},
   Publisher = {IEEE},
   Year = {2018},
   Month = {August},
   url = {http://dx.doi.org/10.1109/ISIT.2018.8437734},
   Abstract = {An important problem in fiber-optic communications is to
             invert the nonlinear Schrödinger equation in real time to
             reverse the deterministic effects of the channel.
             Interestingly, the popular split-step Fourier method (SSFM)
             leads to a computation graph that is reminiscent of a deep
             neural network. This observation allows one to leverage
             tools from machine learning to reduce complexity. In
             particular, the main disadvantage of the SSFM is that its
             complexity using M steps is at least M times larger than a
             linear equalizer. This is because the linear SSFM operator
             is a dense matrix. In previous work, truncation methods such
             as frequency sampling, wavelets, or least-squares have been
             used to obtain 'cheaper' operators that can be implemented
             using filters. However, a large number of filter taps are
             typically required to limit truncation errors. For example,
             Ip and Kahn showed that for a 10 Gbaud signal and 2000 km
             optical link, a truncated SSFM with 25 steps would require
             70-tap filters in each step and 100 times more operations
             than linear equalization. We find that, by jointly
             optimizing all filters with deep learning, the complexity
             can be reduced significantly for similar accuracy. Using
             optimized 5-tap and 3-tap filters in an alternating fashion,
             one requires only around 2-6 times the complexity of linear
             equalization, depending on the implementation.},
   Doi = {10.1109/ISIT.2018.8437734},
   Key = {fds337694}
}

@article{fds319318,
   Author = {Hamidi-Sepehr, F and Pfister, HD and Chamberland,
             JF},
   Title = {Delay-Sensitive Communication over Fading Channels: Queueing
             Behavior and Code Parameter Selection},
   Journal = {Ieee Transactions on Vehicular Technology},
   Volume = {64},
   Number = {9},
   Pages = {3957-3970},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2015},
   Month = {September},
   url = {http://dx.doi.org/10.1109/TVT.2014.2365181},
   Abstract = {This paper examines the queueing performance of
             communication systems that transmit encoded data over
             unreliable channels. A fading formulation suitable for
             wireless mobile applications is considered, where errors are
             caused by a discrete channel with correlated behavior over
             time. For carefully selected channel models and arrival
             processes, a tractable Markov structure composed of queue
             length and channel state is identified. This facilitates the
             analysis of the stationary behavior of the system, leading
             to evaluation criteria such as bounds on the probability of
             the queue exceeding a threshold. Specifically, this paper
             focuses on system models with scalable arrival profiles,
             which are based on Poisson processes, and finite-state
             channels with memory. These assumptions permit the rigorous
             comparison of system performance for codes with arbitrary
             block lengths and code rates. Based on the resulting
             characterizations, it is possible to select the best code
             parameters for delay-sensitive applications over various
             channels. Random codes and BCH codes are then employed as a
             means to study the relationship between code parameter
             selection and the queueing performance of point-to-point
             data links. The methodology introduced herein offers a new
             perspective on the joint queueing-coding analysis of
             finite-state channels with memory, and it is supported by
             numerical simulations.},
   Doi = {10.1109/TVT.2014.2365181},
   Key = {fds319318}
}

@article{fds322715,
   Author = {Hager, C and Pfister, HD and Amat, AG and Brannstrom,
             F},
   Title = {Density evolution and error floor analysis for staircase and
             braided codes},
   Journal = {2016 Optical Fiber Communications Conference and Exhibition,
             Ofc 2016},
   Year = {2016},
   Month = {August},
   ISBN = {9781943580071},
   url = {http://dx.doi.org/10.1364/ofc.2016.th2a.42},
   Abstract = {We analyze deterministically constructed (i.e.,
             non-ensemble-based) codes in the waterfall and error floor
             region. The analysis directly applies to several FEC classes
             proposed for high-speed OTNs such as staircase and braided
             codes.},
   Doi = {10.1364/ofc.2016.th2a.42},
   Key = {fds322715}
}

@article{fds326794,
   Author = {Häger, C and Pfister, HD and GraellAmat, A and Brännström,
             F},
   Title = {Density evolution for deterministic generalized product
             codes on the binary erasure channel at high
             rates},
   Journal = {Ieee Transactions on Information Theory},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2017},
   Month = {July},
   url = {http://dx.doi.org/10.1109/TIT.2017.2689783},
   Abstract = {Generalized product codes (GPCs) are extensions of product
             codes (PCs), where code symbols are protected by two
             component codes but not necessarily arranged in a
             rectangular array. We consider a deterministic construction
             of GPCs (as opposed to randomized code ensembles) and
             analyze the asymptotic performance over the binary erasure
             channel under iterative decoding. Our code construction
             encompasses several classes of GPCs previously proposed in
             the literature, such as irregular PCs, blockwise braided
             codes, and staircase codes. It is assumed that the component
             codes can correct a fixed number of erasures and that the
             length of each component code tends to infinity. We show
             that this setup is equivalent to studying the behavior of a
             peeling algorithm applied to a sparse inhomogeneous random
             graph. Using a convergence result for these graphs, we
             derive the density evolution equations that characterize the
             asymptotic decoding performance. As an application, we
             discuss the design of irregular GPCs, employing a mixture of
             component codes with different erasure-correcting
             capabilities.},
   Doi = {10.1109/TIT.2017.2689783},
   Key = {fds326794}
}

@article{fds322710,
   Author = {Hager, C and Amat, AGI and Pfister, HD and Brannstrom,
             F},
   Title = {Density evolution for deterministic generalized product
             codes with higher-order modulation},
   Journal = {International Symposium on Turbo Codes and Iterative
             Information Processing, Istc},
   Volume = {2016-October},
   Pages = {236-240},
   Publisher = {IEEE},
   Year = {2016},
   Month = {October},
   ISBN = {9781509034017},
   url = {http://dx.doi.org/10.1109/ISTC.2016.7593112},
   Abstract = {Generalized product codes (GPCs) are extensions of product
             codes (PCs) where coded bits are protected by two component
             codes but not necessarily arranged in a rectangular array.
             It has recently been shown that there exists a large class
             of deterministic GPCs (including, e.g., irregular PCs,
             half-product codes, staircase codes, and certain braided
             codes) for which the asymptotic performance under iterative
             bounded-distance decoding over the binary erasure channel
             (BEC) can be rigorously characterized in terms of a density
             evolution analysis. In this paper, the analysis is extended
             to the case where transmission takes place over parallel
             BECs with different erasure probabilities. We use this model
             to predict the code performance in a coded modulation setup
             with higher-order signal constellations. We also discuss the
             design of the bit mapper that determines the allocation of
             the coded bits to the modulation bits of the signal
             constellation.},
   Doi = {10.1109/ISTC.2016.7593112},
   Key = {fds322710}
}

@article{fds319384,
   Author = {Maduike, D and Pfister, HD and Sprintson, A},
   Title = {Design and implementation of physical-layer network-coding
             protocols},
   Journal = {Conference Record Asilomar Conference on Signals, Systems
             and Computers},
   Pages = {781-787},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   ISBN = {9781424458271},
   url = {http://dx.doi.org/10.1109/ACSSC.2009.5469964},
   Abstract = {Recently, there has been a growing interest in
             physical-layer network-coding techniques that facilitate
             information transfer in wireless relay networks.
             Physical-layer networkcoding techniques take advantage of
             the additive nature of wireless signals by allowing two
             terminals to transmit simultaneously to the relay node. This
             technique has several performance benefits, such as
             improving utilization and throughput of wireless channels
             and reducing delay. In this paper, we present an algorithm
             for joint decoding of two unsynchronized transmitters to the
             modulo-2 sum of their transmitted messages. We address the
             problems that arise when the boundaries of the signals do
             not align with each other and when their phases are not
             identical. Our approach uses a statebased Viterbi decoding
             scheme that takes into account the timing offsets between
             the interfering signals. Our simulation studies show that
             this decoder outperforms previous approaches in the presence
             of noise and timing offset. © 2009 IEEE.},
   Doi = {10.1109/ACSSC.2009.5469964},
   Key = {fds319384}
}

@article{fds319412,
   Author = {Hou, J and Siegel, PH and Milstein, LB and Pfister,
             HD},
   Title = {Design of low-density parity-check codes for bandwidth
             efficient modulation},
   Journal = {Proceedings 2001 Ieee Information Theory Workshop, Itw
             2001},
   Pages = {24-26},
   Publisher = {IEEE},
   Year = {2001},
   Month = {January},
   ISBN = {0780371194},
   url = {http://dx.doi.org/10.1109/ITW.2001.955124},
   Abstract = {We design low-density parity-check (LDPC) codes for
             bandwidth efficient modulation using a multilevel coding
             (MLC) technique. We develop a method to analyze the
             asymptotic performance of the LDPC codes using
             message-passing decoding at each level of the MLC scheme as
             the codeword length goes to infinity. Simulation of very
             large block size LDPC codes verifies the accuracy of the
             analytical results. We jointly optimize the code rates and
             code parameters of the LDPC codes at each level of the MLC
             scheme, and the asymptotic performance of the optimized
             irregular LDPC codes is very close to the channel capacity
             of the additive white Gaussian noise (AWGN)
             channel.},
   Doi = {10.1109/ITW.2001.955124},
   Key = {fds319412}
}

@article{fds319393,
   Author = {Soriaga, JB and Pfister, HD and Siegel, PH},
   Title = {Determining and approaching achievable rates of binary
             intersymbol interference channels using multistage
             decoding},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {53},
   Number = {4},
   Pages = {1416-1429},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2007},
   Month = {April},
   url = {http://dx.doi.org/10.1109/TIT.2007.892778},
   Abstract = {By examining the achievable rates of a multistage decoding
             system on stationary ergodic channels, we derive lower
             bounds on the mutual information rate corresponding to
             independent and uniformly distributed (i.u.d.) inputs, also
             referred to as the i.u.d. information rate. For binary
             intersymbol interference (ISI) channels, we show that these
             bounds become tight as the number of decoding stages
             increases. Our analysis, which focuses on the marginal
             conditional output densities at each stage of decoding,
             provides an information rate corresponding to each stage.
             These rates underlie the design of multilevel coding
             schemes, based upon low-density parity-check (LDPC) codes
             and message passing, that in combination with multistage
             decoding approach the i.u.d. information rate for binary ISI
             channels. We give example constructions for channel models
             that have been commonly used in magnetic recording. These
             examples demonstrate that the technique is very effective
             even for a small number of decoding stages. © 2007
             IEEE.},
   Doi = {10.1109/TIT.2007.892778},
   Key = {fds319393}
}

@article{fds319310,
   Author = {Hager, C and Pfister, HD and Graell I Amat and A and Brannstrom,
             F},
   Title = {Deterministic and ensemble-based spatially-coupled product
             codes},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2016-August},
   Pages = {2114-2118},
   Publisher = {IEEE},
   Year = {2016},
   Month = {August},
   ISBN = {9781509018062},
   url = {http://dx.doi.org/10.1109/ISIT.2016.7541672},
   Abstract = {Several authors have proposed spatially-coupled (or
             convolutional-like) variants of product codes (PCs). In this
             paper, we focus on a parametrized family of generalized PCs
             that recovers some of these codes (e.g., staircase and
             block-wise braided codes) as special cases and study the
             iterative decoding performance over the binary erasure
             channel. Even though our code construction is deterministic
             (and not based on a randomized ensemble), we show that it is
             still possible to rigorously derive the density evolution
             (DE) equations that govern the asymptotic performance. The
             obtained DE equations are then compared to those for a
             related spatially-coupled PC ensemble. In particular, we
             show that there exists a family of (deterministic) braided
             codes that follows the same DE equation as the ensemble, for
             any spatial length and coupling width.},
   Doi = {10.1109/ISIT.2016.7541672},
   Key = {fds319310}
}

@article{fds322718,
   Author = {Xia, Z and Huff, GH and Chamberland, JF and Pfister, H and Bhattacharya,
             R},
   Title = {Direction of arrival estimation using canonical and
             crystallographic volumetric element configurations},
   Journal = {Proceedings of 6th European Conference on Antennas and
             Propagation, Eucap 2012},
   Pages = {1436-1439},
   Publisher = {IEEE},
   Year = {2012},
   Month = {July},
   ISBN = {9781457709180},
   url = {http://dx.doi.org/10.1109/EuCAP.2012.6206123},
   Abstract = {Volumetric distributions of receiving antennas are
             investigated for their ability to mitigate aliasing and
             enhance the direction of arrival (DOA) estimation. Surface
             and volumetric distributions in canonical arrangements
             (cubic and spherical) of elements and configurations
             inspired by crystallographic lattices (cubic, tetragonal,
             etc.) are investigated using the Multiple Signal
             Classification (MUSIC) algorithm. These are compared to
             planar and linear distributions, and experimental
             observations from basic transmission measurements are used
             to evaluate several of the element distributions and compare
             their performance. © 2012 IEEE.},
   Doi = {10.1109/EuCAP.2012.6206123},
   Key = {fds322718}
}

@article{fds352265,
   Author = {Thangaraj, A and Pfister, HD},
   Title = {Efficient Maximum-Likelihood Decoding of Reed-Muller
             RM(m-3,m) Codes},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2020-June},
   Pages = {263-268},
   Year = {2020},
   Month = {June},
   ISBN = {9781728164328},
   url = {http://dx.doi.org/10.1109/ISIT44484.2020.9174065},
   Abstract = {Reed-Muller (RM) codes, a classical family of codes known
             for their elegant algebraic structure, have recently been
             shown to achieve capacity under maximum-likelihood (ML)
             decoding on the binary erasure channel and this has
             rekindled interest in their efficient decoding. We consider
             the code family RM(m-3,m) and develop a new ML decoder, for
             transmission over the binary symmetric channel, that
             exploits their large symmetry group. The new decoder has
             lower complexity than an earlier method introduced by
             Seroussi and Lempel in 1983.},
   Doi = {10.1109/ISIT44484.2020.9174065},
   Key = {fds352265}
}

@article{fds339599,
   Author = {Yoo, I and Imani, MF and Sleasman, T and Pfister, HD and Smith,
             DR},
   Title = {Enhancing Capacity of Spatial Multiplexing Systems Using
             Reconfigurable Cavity-Backed Metasurface Antennas in
             Clustered MIMO Channels},
   Journal = {Ieee Transactions on Communications},
   Volume = {67},
   Number = {2},
   Pages = {1070-1084},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1109/TCOMM.2018.2876899},
   Abstract = {We propose a spatial multiplexing system using
             reconfigurable cavity-backed metasurface antennas. The
             metasurface antennas consist of a printed cavity with
             dynamically tunable metamaterial radiators patterned on one
             side and fed by multiple radio frequency ports on the other
             side (each port representing one communication node),
             forming a shared aperture. By individual tuning of the
             radiators, the antennas can generate steerable, concurrent
             beams that can be adapted to the properties of
             multiple-input-multiple-output (MIMO) channels. In this
             paper, we present a 2 × 2 MIMO system with simulated
             metasurface antennas as transmit and receive antennas
             operating at 5.9 GHz. We demonstrate that the flexibility in
             beamforming supported by the metasurface antennas can be
             used to achieve low spatial correlation and high SNR gain in
             clustered MIMO channels, leading to a significant
             improvement of the channel capacity. Numerical studies show
             2.36-fold, 2.11-fold enhancements of capacity in MIMO
             channels with one and two clusters, respectively, compared
             with an MIMO system consisting of linear dipoles. The MIMO
             system based on the metasurface antennas can be low cost,
             low profile, and low power. The metasurface antenna thus has
             potential applications in small cell networks requiring high
             data rate under bandwidth, energy, and cost
             constraints.},
   Doi = {10.1109/TCOMM.2018.2876899},
   Key = {fds339599}
}

@article{fds319399,
   Author = {Pfister, HD},
   Title = {Finite-length analysis of a capacity-achieving ensemble for
             the binary erasure channel},
   Journal = {Proceedings of the Ieee Itsoc Information Theory Workshop
             2005 on Coding and Complexity, Itw2005},
   Pages = {166-170},
   Publisher = {IEEE},
   Year = {2005},
   Month = {December},
   ISBN = {078039481X},
   url = {http://dx.doi.org/10.1109/ITW.2005.1531880},
   Abstract = {In this paper, we consider the finite-length performance of
             a capacity-achieving sequence of irregular repeat-accumulate
             (IRA) code ensembles. We focus on a sequence of bit-regular
             ensembles with degree 3 which was shown to achieve capacity
             with bounded complexity [9]. To characterize how fast the
             block length of the code must grow with respect to the
             truncation point of the degree distribution (i.e., maximum
             check degree), we compute an upper bound on the average
             weight enumerator. Based on this analysis, we present a
             particular truncation sequence that could achieve a minimum
             distance which grows like n1/3 even as the gap to capacity
             goes to zero. We also consider the performance of these
             codes in the waterfall region by extending the finite-length
             scaling law [1] from low-density parity-check codes to IRA
             codes. This shows that the performance near the iterative
             decoding threshold is well characterized by a suitably
             scaled Q-function for large enough block length. Numerical
             results are given for the scaling parameters of this
             ensemble sequence and for a few other IRA codes.
             Unfortunately, the simulation results for the
             capacity-achieving sequence start to match the scaling law
             only for very large block lengths. © 2005
             IEEE.},
   Doi = {10.1109/ITW.2005.1531880},
   Key = {fds319399}
}

@article{fds322719,
   Author = {Chamberland, JF and Pfister, H and Shakkottai,
             S},
   Title = {First-passage time analysis for digital communication over
             erasure channels with delay-sensitive traffic},
   Journal = {2010 48th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2010},
   Pages = {399-405},
   Publisher = {IEEE},
   Year = {2010},
   Month = {December},
   ISBN = {9781424482146},
   url = {http://dx.doi.org/10.1109/ALLERTON.2010.5706934},
   Abstract = {This article explores the relation between queueing behavior
             and code-rate selection for digital communication over
             correlated erasure channels. The focus is on non-asymptotic
             system analysis, with finite block-lengths and non-vanishing
             probabilities of decoding failure. The transmit buffer is
             assumed to possess a given initial distribution and
             performance is evaluated in terms of the time required for
             the queue to become empty. Special attention is given to
             channel memory and its impact on the decoding process at the
             receiver. The system is ultimately defined in terms of a
             finite-state erasure channel. Using a Markov structure, the
             evolution of the transmit buffer is characterized and the
             distribution of the first-passage time to an empty queue is
             obtained. The proposed methodology is employed to optimally
             select code-rate. This provides new insights on the natural
             tradeoff between error protection and data content in the
             finite block-length regime. ©2010 IEEE.},
   Doi = {10.1109/ALLERTON.2010.5706934},
   Key = {fds322719}
}

@article{fds319335,
   Author = {Kumar, S and Chamberland, JF and Pfister, HD},
   Title = {First-passage time and large-deviation analysis for erasure
             channels with memory},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {59},
   Number = {9},
   Pages = {5547-5565},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2013},
   Month = {August},
   url = {http://dx.doi.org/10.1109/TIT.2013.2260593},
   Abstract = {This paper considers the performance of digital
             communication systems transmitting messages over
             finite-state erasure channels with memory. Information bits
             are protected from channel erasures using error-correcting
             codes; successful receptions of codewords are acknowledged
             at the source through instantaneous feedback. The primary
             focus of this research is on delay-sensitive applications,
             codes with finite block lengths, and, necessarily,
             nonvanishing probabilities of decoding failure. The
             contribution of this paper is twofold. A methodology to
             compute the distribution of the time required to empty a
             buffer is introduced. Based on this distribution, the mean
             hitting time to an empty queue and delay-violation
             probabilities for specific thresholds can be computed
             explicitly. The proposed techniques apply to situations
             where the transmit buffer contains a predetermined number of
             information bits at the onset of the data transfer.
             Furthermore, as additional performance criteria, large
             deviation principles are obtained for the empirical mean
             service time and the average packet-transmission time
             associated with the communication process. This rigorous
             framework yields a pragmatic methodology to select code rate
             and block length for the communication unit as functions of
             the service requirements. Examples motivated by practical
             systems are provided to further illustrate the applicability
             of these techniques. © 1963-2012 IEEE.},
   Doi = {10.1109/TIT.2013.2260593},
   Key = {fds319335}
}

@article{fds319387,
   Author = {Zhang, F and Mao, X and Zhou, W and Pfister, HD},
   Title = {Girth-10 LDPC codes based on 3-D cyclic lattices},
   Journal = {Ieee Transactions on Vehicular Technology},
   Volume = {57},
   Number = {2},
   Pages = {1049-1060},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {March},
   url = {http://dx.doi.org/10.1109/TVT.2007.905622},
   Abstract = {In this paper, we propose a new method based on
             combinatorial designs for constructing high-girth
             low-density parity-check (LDPC) codes. We use a 3-D lattice
             to generate balanced incomplete block designs based on
             planes and lines in the lattice. This gives families of
             regular LDPC codes with girths of at least 6, 8, and 10,
             whose parity-check matrices are all block circulant. The
             main advantage of this construction is that the algebraic
             structure leads to efficient encoders and decoders. Based on
             the block-circulant structure of a parity-check matrix, we
             present an efficient encoder that can be parallelized to
             improve the speed of encoding. The simulation results show
             that these families of LDPC codes perform very well on
             additive-white-Gaussian-noise channels (roughly 0.45 dB from
             the channel capacity) and Rayleigh fading channels (roughly
             0.51 dB from the channel capacity). © 2008
             IEEE.},
   Doi = {10.1109/TVT.2007.905622},
   Key = {fds319387}
}

@article{fds319367,
   Author = {Kim, BH and Yedla, A and Pfister, HD},
   Title = {IMP: A message-passing algorithm for matrix
             completion},
   Journal = {6th International Symposium on Turbo Codes and Iterative
             Information Processing, Istc 2010},
   Pages = {462-466},
   Publisher = {IEEE},
   Year = {2010},
   Month = {November},
   ISBN = {9781424467457},
   url = {http://dx.doi.org/10.1109/ISTC.2010.5613803},
   Abstract = {A new message-passing (MP) method is considered for the
             matrix completion problem associated with recommender
             systems. We attack the problem using a (generative) factor
             graph model that is related to a probabilistic low-rank
             matrix factorization. Based on the model, we propose a new
             algorithm, termed IMP, for the recovery of a data matrix
             from incomplete observations. The algorithm is based on a
             clustering followed by inference via MP (IMP). The algorithm
             is compared with a number of other matrix completion
             algorithms on real collaborative filtering (e.g., Netflix)
             data matrices. Our results show that, while many methods
             perform similarly with a large number of revealed entries,
             the IMP algorithm outperforms all others when the fraction
             of observed entries is small. This is helpful because it
             reduces the well-known cold-start problem associated with
             collaborative filtering (CF) systems in practice. © 2010
             IEEE.},
   Doi = {10.1109/ISTC.2010.5613803},
   Key = {fds319367}
}

@article{fds319397,
   Author = {Hou, J and Smee, JE and Pfister, HD and Tomasin, S},
   Title = {Implementing interference cancellation to increase the EV-DO
             Rev A reverse link capacity},
   Journal = {Ieee Communications Magazine},
   Volume = {44},
   Number = {2},
   Pages = {96-102},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2006},
   Month = {February},
   url = {http://dx.doi.org/10.1109/MCOM.2006.1593551},
   Abstract = {This article provides the principles and practice of how
             interference cancellation can be implemented on the EV-DO
             Rev A reverse link. It is shown that applying interference
             cancellation to CDMA achieves the multiple access channel
             sum rate capacity for either frame synchronous or
             asynchronous users. The per user SINR gain from space-time
             interference cancellation translates directly into a CDMA
             capacity gain of the same factor, allowing EV-DO Rev A to
             support more users with higher data rates. We demonstrate
             how interference cancellation can be added to base station
             processing without modifying user terminals, EV-DO
             standards, or network coverage. We present commercially
             viable receiver architectures for implementing interference
             cancellation with the asynchronism and H-ARQ of EV-DO RevA,
             and explain why closed loop power control can operate the
             same way it does today. Network level simulations over a
             wide range of channels confirm that interference
             cancellation offers significant capacity gains for all
             users, while maintaining the same link budget and system
             stability. © 2006 IEEE.},
   Doi = {10.1109/MCOM.2006.1593551},
   Key = {fds319397}
}

@article{fds322711,
   Author = {Sanatkar, MR and Pfister, HD},
   Title = {Increasing the rate of spatially-coupled codes via optimized
             irregular termination},
   Journal = {International Symposium on Turbo Codes and Iterative
             Information Processing, Istc},
   Volume = {2016-October},
   Pages = {31-35},
   Publisher = {IEEE},
   Year = {2016},
   Month = {October},
   ISBN = {9781509034017},
   url = {http://dx.doi.org/10.1109/ISTC.2016.7593071},
   Abstract = {In this paper, we consider the rate-loss problem for
             spatially-coupled LDPC (SC-LDPC) codes on the binary erasure
             channel. Although SC-LDPC codes have good noise thresholds
             under belief-propagation (BP) decoding, they also suffer a
             rate-loss due to termination that is significant at moderate
             blocklengths. Our idea is to attach additional variable
             nodes at the boundary using an irregular degree
             distribution. Then, this degree distribution is optimized to
             improve the code rate without reducing the BP threshold. The
             optimization is formulated as an linear program and solved
             numerically. Our results show that the code rate can be
             increased by a reasonable amount without decreasing the BP
             threshold.},
   Doi = {10.1109/ISTC.2016.7593071},
   Key = {fds322711}
}

@article{fds319343,
   Author = {Narayanan, KR and Pfister, HD},
   Title = {Iterative collision resolution for slotted ALOHA: An optimal
             uncoordinated transmission policy},
   Journal = {International Symposium on Turbo Codes and Iterative
             Information Processing, Istc},
   Pages = {136-139},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   ISBN = {9781457721151},
   url = {http://dx.doi.org/10.1109/ISTC.2012.6325214},
   Abstract = {We consider a multi-user wireless network in which each user
             has one packet of information to transmit to a central
             receiver. We study an uncoordinated paradigm where the users
             send their packet a random number of times according to a
             probability distribution. Instead of discarding the collided
             packets, the receiver performs iterative collision
             resolution. Recently, a few studies have shown that the
             iterative collision resolution process can be viewed as
             message-passing decoding on an appropriately defined Tanner
             graph. Using this equivalence, they used standard techniques
             to numerically optimize the probability distribution and
             demonstrated substantial throughput improvement over slotted
             ALOHA. In this paper, we show that the well-known soliton
             distribution is an optimal probability distribution and that
             the resulting throughput efficiency can be arbitrarily close
             to 1. © 2012 IEEE.},
   Doi = {10.1109/ISTC.2012.6325214},
   Key = {fds319343}
}

@article{fds319337,
   Author = {Jian, YY and Pfister, HD and Narayanan, KR and Rao, R and Mazahreh,
             R},
   Title = {Iterative hard-decision decoding of braided BCH codes for
             high-speed optical communication},
   Journal = {Globecom Ieee Global Telecommunications Conference},
   Pages = {2376-2381},
   Publisher = {IEEE},
   Year = {2013},
   Month = {January},
   ISBN = {9781479913534},
   url = {http://dx.doi.org/10.1109/GLOCOM.2013.6831429},
   Abstract = {Designing error-correcting codes for optical communication
             is challenging mainly because of the high data rates (e.g.,
             100 Gbps) required and the expectation of low latency, low
             overhead (e.g., 7% redundancy), and large coding gain (e.g.,
             >9dB). Although soft-decision decoding (SDD) of low-density
             parity-check (LDPC) codes is an active area of research, the
             mainstay of optical transport systems is still the iterative
             hard-decision decoding (HDD) of generalized product codes
             with algebraic syndrome decoding of the component codes.
             This is because iterative HDD allows many simplifications
             and SDD of LDPC codes results in much higher implementation
             complexity. In this paper, we use analysis and simulation to
             evaluate tightly-braided block codes with BCH component
             codes for high-speed optical communication. Simulation of
             the iterative HDD shows that these codes are competitive
             with the best schemes based on HDD. Finally, we suggest a
             specific design that is compatible with the G.709 framing
             structure and exhibits a coding gain of >9.35 dB at 7%
             redundancy under iterative HDD with a latency of
             approximately 1 million bits. © 2013 IEEE.},
   Doi = {10.1109/GLOCOM.2013.6831429},
   Key = {fds319337}
}

@article{fds319357,
   Author = {Kim, BH and Pfister, HD},
   Title = {Joint decoding of LDPC codes and finite-state channels via
             linear-programming},
   Journal = {Ieee Journal of Selected Topics in Signal
             Processing},
   Volume = {5},
   Number = {8},
   Pages = {1563-1576},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {December},
   url = {http://dx.doi.org/10.1109/JSTSP.2011.2165525},
   Abstract = {This paper considers the joint-decoding problem for
             finite-state channels (FSCs) and low-density parity-check
             (LDPC) codes. In the first part, the linear-programming (LP)
             decoder for binary linear codes is extended to perform
             joint-decoding of binary-input FSCs. In particular, we
             provide a rigorous definition of LP joint-decoding
             pseudo-codewords (JD-PCWs) that enables evaluation of the
             pairwise error probability between codewords and JD-PCWs in
             AWGN. This leads naturally to a provable upper bound on
             decoder failure probability. If the channel is a
             finite-state intersymbol interference channel, then the
             joint LP decoder also has the maximum-likelihood (ML)
             certificate property and all integer-valued solutions are
             codewords. In this case, the performance loss relative to ML
             decoding can be explained completely by fractional-valued
             JD-PCWs. After deriving these results, we discovered some
             elements were equivalent to earlier work by Flanagan on
             linear-programming receivers. In the second part, we develop
             an efficient iterative solver for the joint LP decoder
             discussed in the first part. In particular, we extend the
             approach of iterative approximate LP decoding, proposed by
             Vontobel and Koetter and analyzed by Burshtein, to this
             problem. By taking advantage of the dual-domain structure of
             the joint-decoding LP, we obtain a convergent iterative
             algorithm for joint LP decoding whose structure is similar
             to BCJR-based turbo equalization (TE). The result is a joint
             iterative decoder whose per-iteration complexity is similar
             to that of TE but whose performance is similar to that of
             joint LP decoding. The main advantage of this decoder is
             that it appears to provide the predictability and superior
             performance of joint LP decoding with the computational
             complexity of TE. One expected application is coding for
             magnetic storage where the required block-error rate is
             extremely low and system performance is difficult to verify
             by simulation. © 2011 IEEE.},
   Doi = {10.1109/JSTSP.2011.2165525},
   Key = {fds319357}
}

@article{fds319389,
   Author = {Pfister, HD and Siegel, PH},
   Title = {Joint iterative decoding of LDPC codes for channels with
             memory and erasure noise},
   Journal = {Ieee Journal on Selected Areas in Communications},
   Volume = {26},
   Number = {2},
   Pages = {320-337},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {February},
   url = {http://dx.doi.org/10.1109/JSAC.2008.080209},
   Abstract = {This paper investigates the joint iterative decoding of
             low-density parity-check (LDPC) codes and channels with
             memory. Sequences of irregular LDPC codes are presented that
             achieve, under joint iterative decoding, the symmetric
             information rate of a class of channels with memory and
             erasure noise. This gives proof, for the first time, that
             joint iterative decoding can be information rate lossless
             with respect to maximum-likelihood decoding. These results
             build on previous capacity-achieving code constructions for
             the binary erasure channel. A two state intersymbol-interference
             channel with erasure noise, known as the dicode erasure
             channel, is used as a concrete example throughout the paper.
             © 2008 IEEE.},
   Doi = {10.1109/JSAC.2008.080209},
   Key = {fds319389}
}

@article{fds319370,
   Author = {Wilson, MP and Narayanan, K and Pfister, HD and Sprintson,
             A},
   Title = {Joint physical layer coding and network coding for
             bidirectional relaying},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {56},
   Number = {11},
   Pages = {5641-5654},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2010},
   Month = {November},
   url = {http://dx.doi.org/10.1109/TIT.2010.2068750},
   Abstract = {We consider a communication system where two transmitters
             wish to exchange information through a central relay. The
             transmitter and relay nodes exchange data over synchronized,
             average power constrained additive white Gaussian noise
             channels with a real input with signal-to-noise ratio (SNR)
             of ssr. An upper bound on the capacity is 1/2 log (1+ ssr)
             bits per transmitter per use of the multiple access phase
             and broadcast phase of the bidirectional relay channel. We
             show that, using lattice codes and lattice decoding, we can
             obtain a rate of 1/2 log (1/2 + ssr) bits per transmitter,
             which is essentially optimal at high SNR. The main idea is
             to decode the sum of the codewords modulo a lattice at the
             relay followed by a broadcast phase which performs
             SlepianWolf coding. We also show that if the two
             transmitters use identical lattices with minimum angle
             decoding, we can achieve the same rate of 1/2 log(1/2 +
             snr). The proposed scheme can be thought of as a joint
             physical-layer network-layer code which outperforms other
             recently proposed analog network coding schemes. © 2006
             IEEE.},
   Doi = {10.1109/TIT.2010.2068750},
   Key = {fds319370}
}

@article{fds352409,
   Author = {Can, T and Rengaswamy, N and Calderbank, R and Pfister,
             HD},
   Title = {Kerdock Codes Determine Unitary 2-Designs},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {66},
   Number = {10},
   Pages = {6104-6120},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2020},
   Month = {October},
   url = {http://dx.doi.org/10.1109/TIT.2020.3015683},
   Abstract = {The non-linear binary Kerdock codes are known to be Gray
             images of certain extended cyclic codes of length N = 2m
             over Z4. We show that exponentiating these Z4-valued
             codewords by i ≜-1 produces stabilizer states, that are
             quantum states obtained using only Clifford unitaries. These
             states are also the common eigenvectors of commuting
             Hermitian matrices forming maximal commutative subgroups
             (MCS) of the Pauli group. We use this quantum description to
             simplify the derivation of the classical weight distribution
             of Kerdock codes. Next, we organize the stabilizer states to
             form N+1 mutually unbiased bases and prove that
             automorphisms of the Kerdock code permute their
             corresponding MCS, thereby forming a subgroup of the
             Clifford group. When represented as symplectic matrices,
             this subgroup is isomorphic to the projective special linear
             group PSL(2,N). We show that this automorphism group acts
             transitively on the Pauli matrices, which implies that the
             ensemble is Pauli mixing and hence forms a unitary 2-design.
             The Kerdock design described here was originally discovered
             by Cleve et al. (2016), but the connection to classical
             codes is new which simplifies its description and
             translation to circuits significantly. Sampling from the
             design is straightforward, the translation to circuits uses
             only Clifford gates, and the process does not require
             ancillary qubits. Finally, we also develop algorithms for
             optimizing the synthesis of unitary 2-designs on encoded
             qubits, i.e., to construct logical unitary 2-designs.
             Software implementations are available at
             https://github.com/nrenga/symplectic-arxiv18a, which we use
             to provide empirical gate complexities for up to 16
             qubits.},
   Doi = {10.1109/TIT.2020.3015683},
   Key = {fds352409}
}

@article{fds346725,
   Author = {Can, T and Rengaswamy, N and Calderbank, R and Pfister,
             HD},
   Title = {Kerdock Codes Determine Unitary 2-Designs},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2019-July},
   Pages = {2908-2912},
   Year = {2019},
   Month = {July},
   url = {http://dx.doi.org/10.1109/ISIT.2019.8849504},
   Abstract = {The binary non-linear Kerdock codes are Gray images of
             {\mathbb{Z}-4}-linear Kerdock codes of length N =2m. We show
             that exponentiating \imath = \sqrt { - 1} by these
             {\mathbb{Z}-4}-valued codewords produces stabilizer states,
             which are the common eigenvectors of maximal commutative
             subgroups (MCS) of the Pauli group. We use this quantum
             description to simplify the proof of the classical weight
             distribution of Kerdock codes. Next, we partition stabilizer
             states into N +1 mutually unbiased bases and prove that
             automorphisms of the Kerdock code permute the associated
             MCS. This automorphism group, represented as symplectic
             matrices, is isomorphic to the projective special linear
             group PSL(2,N) and forms a unitary 2-design. The design
             described here was originally discovered by Cleve et al.
             (2016), but the connection to classical codes is new. This
             significantly simplifies the description of the design and
             its translation to circuits.},
   Doi = {10.1109/ISIT.2019.8849504},
   Key = {fds346725}
}

@article{fds319356,
   Author = {Kumar, S and Chamberland, JF and Pfister, HD},
   Title = {Large deviations on empirical service for erasure channels
             with memory},
   Journal = {2011 49th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2011},
   Pages = {1641-1648},
   Publisher = {IEEE},
   Year = {2011},
   Month = {December},
   ISBN = {9781457718168},
   url = {http://dx.doi.org/10.1109/Allerton.2011.6120365},
   Abstract = {This article examines the performance of a digital
             communication link from a large deviations perspective. The
             underlying physical environment is modeled as an erasure
             channel with memory. Information is shielded from symbol
             erasures using error-correcting codes with finite
             block-lengths. At the onset of the communication process,
             the transmit buffer is assumed to contain a certain number
             of data bits; these bits are partitioned into segments and
             subsequently transmitted as coded packets. Acknowledgments
             of successful transmissions are obtained through periodic,
             reliable feedback. Performance criteria are derived based on
             the average service rate and the first-passage time to an
             empty queue. The optimization problem is posed in an
             asymptotic setting and large deviation principles are
             obtained for these two quantities. The proposed analysis
             framework provides a methodology tailored to code rate and
             block-length selection for delay-sensitive applications.
             This study leads to pertinent guidelines on how to choose
             system parameters for communication over correlated
             channels. Examples obtained through a numerical study are
             included to further illustrate the value of the techniques
             introduced in this paper. © 2011 IEEE.},
   Doi = {10.1109/Allerton.2011.6120365},
   Key = {fds319356}
}

@article{fds319368,
   Author = {Yedla, A and Pfister, HD and Narayanan, KR},
   Title = {LDPC code design for transmission of correlated sources
             across noisy channels without CSIT},
   Journal = {6th International Symposium on Turbo Codes and Iterative
             Information Processing, Istc 2010},
   Pages = {467-471},
   Publisher = {IEEE},
   Year = {2010},
   Month = {November},
   ISBN = {9781424467457},
   url = {http://dx.doi.org/10.1109/ISTC.2010.5613829},
   Abstract = {We consider the problem of transmitting correlated data
             after independent encoding to a central receiver through
             orthogonal channels. We assume that the channel state
             information is not known at the transmitter. The receiver
             has access to both the source correlation and the channel
             state information. We provide a generic framework for
             analyzing the performance of joint iterative decoding, using
             density evolution. Using differential evolution, we design
             punctured systematic LDPC codes to maximize the region of
             achievable channel conditions, with joint iterative
             decoding. The main contribution of this paper is to
             demonstrate that properly designed LDPC can perform well
             simultaneously over a wide range of channel
             parameters.},
   Doi = {10.1109/ISTC.2010.5613829},
   Key = {fds319368}
}

@article{fds319374,
   Author = {Zhang, F and Pfister, HD and Jiang, A},
   Title = {LDPC codes for rank modulation in flash memories},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {859-863},
   Year = {2010},
   Month = {August},
   ISBN = {9781424469604},
   url = {http://dx.doi.org/10.1109/ISIT.2010.5513603},
   Abstract = {An LDPC code is proposed for flash memories based on rank
             modulation. In contrast to previous approaches, this enables
             the use of long ECCs with fixed-length modulation codes. For
             ECC design, the rank modulation scheme is treated as part of
             an equivalent channel. A probabilistic model of the
             equivalent channel is derived and a simple high-SNR
             approximation is given. LDPC codes over integer rings and
             finite fields are designed for the approximate channel and a
             low-complexity symbol-flipping verification-based (SFVB)
             message-passing decoding algorithm is proposed to take
             advantage of the channel structure. Density evolution (DE)
             is used to calculate decoding thresholds and simulations are
             used to compare the low-complexity decoder with sum-product
             decoding. © 2010 IEEE.},
   Doi = {10.1109/ISIT.2010.5513603},
   Key = {fds319374}
}

@article{fds346724,
   Author = {Lian, M and Carpi, F and Hager, C and Pfister, HD},
   Title = {Learned Belief-Propagation Decoding with Simple Scaling and
             SNR Adaptation},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2019-July},
   Pages = {161-165},
   Year = {2019},
   Month = {July},
   url = {http://dx.doi.org/10.1109/ISIT.2019.8849419},
   Abstract = {We consider the weighted belief-propagation (WBP) decoder
             recently proposed by Nachmani et al. where different weights
             are introduced for each Tanner graph edge and optimized
             using machine learning techniques. Our focus is on
             simple-scaling models that use the same weights across
             certain edges to reduce the storage and computational
             burden. The main contribution is to show that simple scaling
             with few parameters often achieves the same gain as the full
             parameterization. Moreover, several training improvements
             for WBP are proposed. For example, it is shown that
             minimizing average binary cross-entropy is suboptimal in
             general in terms of bit error rate (BER) and a new
             "soft-BER" loss is proposed which can lead to better
             performance. We also investigate parameter adapter networks
             (PANs) that learn the relation between the signal-to-noise
             ratio and the WBP parameters. As an example, for the (32,
             16) Reed-Muller code with a highly redundant parity-check
             matrix, training a PAN with soft-BER loss gives
             near-maximum-likelihood performance assuming simple scaling
             with only three parameters.},
   Doi = {10.1109/ISIT.2019.8849419},
   Key = {fds346724}
}

@article{fds359252,
   Author = {Buchberger, A and Häger, C and Pfister, HD and Schmalen, L and I Amat,
             AG},
   Title = {Learned decimation for neural belief propagation decoders
             (invited paper)},
   Journal = {2015 Ieee International Conference on Acoustics, Speech, and
             Signal Processing (Icassp)},
   Volume = {2021-June},
   Pages = {8273-8277},
   Year = {2021},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ICASSP39728.2021.9414407},
   Abstract = {We introduce a two-stage decimation process to improve the
             performance of neural belief propagation (NBP), recently
             introduced by Nachmani et al., for short low-density
             paritycheck (LDPC) codes. In the first stage, we build a
             list by iterating between a conventional NBP decoder and
             guessing the least reliable bit. The second stage iterates
             between a conventional NBP decoder and learned decimation,
             where we use a neural network to decide the decimation value
             for each bit. For a (128,64) LDPC code, the proposed NBP
             with decimation outperforms NBP decoding by 0.75 dB and
             performs within 1 dB from maximum-likelihood decoding at a
             block error rate of 10-4.},
   Doi = {10.1109/ICASSP39728.2021.9414407},
   Key = {fds359252}
}

@article{fds319395,
   Author = {Hou, J and Smee, JE and Soriaga, JB and Chen, J and Pfister,
             HD},
   Title = {Link-level modeling and performance of CDMA interference
             cancellation},
   Journal = {Globecom Ieee Global Telecommunications Conference},
   Publisher = {IEEE},
   Year = {2006},
   Month = {December},
   ISBN = {142440357X},
   url = {http://dx.doi.org/10.1109/GLOCOM.2006.661},
   Abstract = {A general framework is provided to characterize the link
             level performance of CDMA systems with interference
             cancellation. This closed-form residual power analysis
             accounts for the impact of channel estimation errors due to
             SNR, channel variation, chip asynchronism, and filter
             mismatch. Simulations further quantify the link level
             cancellation performance on more realistic sub-chip
             multipath channels. This work demonstrates that properly
             designed channel estimation and signal reconstruction
             techniques achieve high cancellation efficiency over a
             variety of multipath fading channels. © 2006
             IEEE.},
   Doi = {10.1109/GLOCOM.2006.661},
   Key = {fds319395}
}

@article{fds319390,
   Author = {Fan, Z and Pfister, HD},
   Title = {List-message passing achieves capacity on the q-ary
             symmetric channel for large q},
   Journal = {Globecom Ieee Global Telecommunications Conference},
   Pages = {283-287},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   ISBN = {1424410436},
   url = {http://dx.doi.org/10.1109/GLOCOM.2007.60},
   Abstract = {We discuss and analyze a list-message-passing decoder with
             verification for low-density parity-check (LDPC) codes on
             the q-ary symmetric channel (q-SC). Rather than passing
             messages consisting of symbol probabilities, we pass lists
             of possible symbols and mark very likely symbols as
             verified. The density evolution (DE) equations for this
             decoder are derived and used to compute decoding thresholds.
             If the maximum list-size is unbounded, then we find that any
             capacity-achieving LDPC code for the binary erasure channel
             can be used to achieve capacity on the q-SC for large q. The
             decoding thresholds are also computed via DE for the case
             where each list is truncated to satisfy a maximum list-size
             constraint. The probability of false verification is
             considered for this case, and techniques are discussed to
             mitigate the problem. Optimization of the degree
             distribution is also used to improve the threshold for a
             fixed maximum list size. Finally, the proposed algorithm is
             compared with a variety of other algorithms using both
             density evolution thresholds and simulation results. © 2007
             IEEE.},
   Doi = {10.1109/GLOCOM.2007.60},
   Key = {fds319390}
}

@article{fds319394,
   Author = {Zhang, F and Pfister, HD},
   Title = {List-Message Passing Achieves Capacity on the q-ary
             Symmetric Channel for Large q.},
   Journal = {Globecom},
   Pages = {283-287},
   Publisher = {IEEE},
   Year = {2007},
   url = {http://dx.doi.org/10.1109/GLOCOM.2007.60},
   Doi = {10.1109/GLOCOM.2007.60},
   Key = {fds319394}
}

@article{fds349209,
   Author = {Rengaswamy, N and Calderbank, R and Kadhe, S and Pfister,
             HD},
   Title = {Logical Clifford Synthesis for Stabilizer
             Codes},
   Journal = {Ieee Transactions on Quantum Engineering},
   Volume = {1},
   Pages = {1-17},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2020},
   url = {http://dx.doi.org/10.1109/tqe.2020.3023419},
   Doi = {10.1109/tqe.2020.3023419},
   Key = {fds349209}
}

@article{fds343275,
   Author = {Kim, B-H and Yedla, A and Pfister, HD},
   Title = {Message-Passing Inference on a Factor Graph for
             Collaborative Filtering},
   Year = {2010},
   Month = {April},
   Abstract = {This paper introduces a novel message-passing (MP) framework
             for the collaborative filtering (CF) problem associated with
             recommender systems. We model the movie-rating prediction
             problem popularized by the Netflix Prize, using a
             probabilistic factor graph model and study the model by
             deriving generalization error bounds in terms of the
             training error. Based on the model, we develop a new MP
             algorithm, termed IMP, for learning the model. To show
             superiority of the IMP algorithm, we compare it with the
             closely related expectation-maximization (EM) based
             algorithm and a number of other matrix completion
             algorithms. Our simulation results on Netflix data show
             that, while the methods perform similarly with large amounts
             of data, the IMP algorithm is superior for small amounts of
             data. This improves the cold-start problem of the CF systems
             in practice. Another advantage of the IMP algorithm is that
             it can be analyzed using the technique of density evolution
             (DE) that was originally developed for MP decoding of
             error-correcting codes.},
   Key = {fds343275}
}

@article{fds342174,
   Author = {Schmidt, C and Pfister, HD and Zdeborová, L},
   Title = {Minimal sets to destroy the k-core in random
             networks.},
   Journal = {Physical Review. E},
   Volume = {99},
   Number = {2-1},
   Pages = {022310},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1103/physreve.99.022310},
   Abstract = {We study the problem of finding the smallest set of nodes in
             a network whose removal results in an empty k-core, where
             the k-core is the subnetwork obtained after the iterative
             removal of all nodes of degree smaller than k. This problem
             is also known in the literature as finding the minimal
             contagious set. The main contribution of our work is an
             analysis of the performance of the recently introduced
             corehd algorithm [Zdeborová, Zhang, and Zhou, Sci. Rep. 6,
             37954 (2016)10.1038/srep37954] on random graphs taken from
             the configuration model via a set of deterministic
             differential equations. Our analyses provide upper bounds on
             the size of the minimal contagious set that improve over
             previously known bounds. Our second contribution is a
             heuristic called the weak-neighbor algorithm that
             outperforms all currently known local methods in the regimes
             considered.},
   Doi = {10.1103/physreve.99.022310},
   Key = {fds342174}
}

@article{fds333681,
   Author = {Häger, C and Pfister, HD},
   Title = {Miscorrection-free Decoding of Staircase
             Codes},
   Journal = {European Conference on Optical Communication,
             Ecoc},
   Volume = {2017-September},
   Pages = {1-3},
   Publisher = {IEEE},
   Year = {2017},
   Month = {September},
   url = {http://dx.doi.org/10.1109/ECOC.2017.8345919},
   Abstract = {We propose a novel decoding algorithm for staircase codes
             which reduces the effect of undetected component code
             miscorrections. The algorithm significantly improves
             performance, while retaining a low-complexity implementation
             suitable for high-speed optical transport
             networks.},
   Doi = {10.1109/ECOC.2017.8345919},
   Key = {fds333681}
}

@article{fds350484,
   Author = {Butler, RM and Hager, C and Pfister, HD and Liga, G and Alvarado,
             A},
   Title = {Model-Based Machine Learning for Joint Digital
             Backpropagation and PMD Compensation},
   Journal = {Journal of Lightwave Technology},
   Volume = {39},
   Number = {4},
   Pages = {949-959},
   Year = {2021},
   Month = {February},
   url = {http://dx.doi.org/10.1109/JLT.2020.3034047},
   Abstract = {In this article, we propose a model-based machine-learning
             approach for dual-polarization systems by parameterizing the
             split-step Fourier method for the Manakov-PMD equation. The
             resulting method combines hardware-friendly time-domain
             nonlinearity mitigation via the recently proposed learned
             digital backpropagation (LDBP) with distributed compensation
             of polarization-mode dispersion (PMD). We refer to the
             resulting approach as LDBP-PMD. We train LDBP-PMD on
             multiple PMD realizations and show that it converges within
             1% of its peak dB performance after 428 training iterations
             on average, yielding a peak effective signal-to-noise ratio
             of only 0.30 dB below the PMD-free case. Similar to
             state-of-the-art lumped PMD compensation algorithms in
             practical systems, our approach does not assume any
             knowledge about the particular PMD realization along the
             link, nor any knowledge about the total accumulated PMD.
             This is a significant improvement compared to prior work on
             distributed PMD compensation, where knowledge about the
             accumulated PMD is typically assumed. We also compare
             different parameterization choices in terms of performance,
             complexity, and convergence behavior. Lastly, we demonstrate
             that the learned models can be successfully retrained after
             an abrupt change of the PMD realization along the
             fiber.},
   Doi = {10.1109/JLT.2020.3034047},
   Key = {fds350484}
}

@article{fds352266,
   Author = {Häger, C and Pfister, HD and Bütler, RM and Liga, G and Alvarado,
             A},
   Title = {Model-based machine learning for joint digital
             backpropagation and PMD compensation},
   Journal = {Optics Infobase Conference Papers},
   Volume = {Part F174-OFC 2020},
   Year = {2020},
   Month = {January},
   ISBN = {9781943580712},
   url = {http://dx.doi.org/10.1364/OFC.2020.W3D.3},
   Abstract = {We propose a model-based machine-learning approach for
             polarization-multiplexed systems by parameterizing the
             split-step method for the Manakov-PMD equation. This
             approach performs hardware-friendly DBP and distributed PMD
             compensation with performance close to the PMD-free
             case.},
   Doi = {10.1364/OFC.2020.W3D.3},
   Key = {fds352266}
}

@article{fds319383,
   Author = {Zhang, F and Pfister, HD},
   Title = {Modulation codes for flash memory based on load-balancing
             theory},
   Journal = {2009 47th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2009},
   Pages = {1039-1046},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   ISBN = {9781424458714},
   url = {http://dx.doi.org/10.1109/ALLERTON.2009.5394880},
   Abstract = {In this paper, we consider modulation codes for practical
             multilevel flash memory storage systems with q cell levels.
             Instead of maximizing the lifetime of the device [7], [1],
             [2], [4], we maximize the average amount of information
             stored per cell-level, which is defined as storage
             efficiency. Using this framework, we show that the
             worst-case criterion [7], [1], [2] and the average-case
             criterion [4] are two extreme cases of our objective
             function. A self-randomized modulation code is proposed
             which is asymptotically optimal, as q → ∞, for an
             arbitrary input alphabet and i.i.d. input distribution. In
             practical flash memory systems, the number of cell-levels q
             is only moderately large. So the asymptotic performance as q
             → ∞ may not tell the whole story. Using the tools from
             load-balancing theory, we analyze the storage efficiency of
             the self-randomized modulation code. The result shows that
             only a fraction of the cells are utilized when the number of
             cell-levels q is only moderately large. We also propose a
             load-balancing modulation code, based on a phenomenon known
             as "the power of two random choices" [10], to improve the
             storage efficiency of practical systems. Theoretical
             analysis and simulation results show that our load-balancing
             modulation codes can provide significant gain to practical
             flash memory storage systems. Though pseudo-random, our
             approach achieves the same load-balancing performance, for
             i.i.d. inputs, as a purely random approach based on the
             power of two random choices. ©2009 IEEE.},
   Doi = {10.1109/ALLERTON.2009.5394880},
   Key = {fds319383}
}

@article{fds319410,
   Author = {Hou, J and Siegel, PH and Milstein, LB and Pfister,
             HD},
   Title = {Multilevel coding with low-density parity-check component
             codes},
   Journal = {Conference Record / Ieee Global Telecommunications
             Conference},
   Volume = {2},
   Pages = {1016-1020},
   Year = {2001},
   Month = {December},
   Abstract = {We design multilevel coding (MLC) schemes with low-density
             parity-check (LDPC) codes as component codes at each level.
             We develop a method to analyze the performance of an LDPC
             code at any level as the codeword length goes to infinity
             even if the equivalent binary, input component channels are
             not symmetric. By joint optimization of code rates and
             degree distributions, the optimized irregular LDPC codes at
             each level are capable of achieving reliable transmission at
             signal-to-noise ratios (SNR) very close to the capacity of
             the additive white Gaussian noise (AWGN) channel as the
             codeword length tends to infinity. Simulation results show
             that the optimized LDPC codes also perform very well at
             moderate code-word lengths.},
   Key = {fds319410}
}

@article{fds319326,
   Author = {Vem, A and Huang, YC and Narayanan, KR and Pfister,
             HD},
   Title = {Multilevel lattices based on spatially-coupled LDPC codes
             with applications},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {2336-2340},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISBN = {9781479951864},
   url = {http://dx.doi.org/10.1109/ISIT.2014.6875251},
   Abstract = {We propose a class of lattices constructed using
             Construction D where the underlying linear codes are nested
             binary spatially-coupled low-density parity-check codes
             (SC-LDPC) codes with uniform left and right degrees. By
             leveraging recent results on the optimality of
             spatially-coupled codes for binary input memoryless channels
             and Forney et al.'s earlier results on the optimality of
             construction D, we show that the proposed lattices achieve
             the Poltyrev limit under multistage belief propagation
             decoding. Lattice codes constructed from these lattices are
             shown to provide excellent performance for the three user
             symmetric interference channel. They can also be naturally
             used in applications such as integer-forcing and
             compute-and-forward. © 2014 IEEE.},
   Doi = {10.1109/ISIT.2014.6875251},
   Key = {fds319326}
}

@article{fds337696,
   Author = {Reeves, G and Pfister, HD and Dytso, A},
   Title = {Mutual Information as a Function of Matrix SNR for Linear
             Gaussian Channels},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2018-June},
   Pages = {1754-1758},
   Publisher = {IEEE},
   Year = {2018},
   Month = {August},
   ISBN = {9781538647806},
   url = {http://dx.doi.org/10.1109/ISIT.2018.8437326},
   Abstract = {This paper focuses on the mutual information and minimum
             mean-squared error (MMSE) as a function a matrix-valued
             signal-to-noise ratio (SNR) for a linear Gaussian channel
             with arbitrary input distribution. As shown by Lamarca, the
             mutual-information is a concave function of a positive
             semidefinite matrix, which we call the matrix SNR. This
             implies that the mapping from the matrix SNR to the MMSE
             matrix is decreasing monotone. Building upon these
             functional properties, we start to construct a unifying
             framework that provides a bridge between classical
             information-theoretic inequalities, such as the entropy
             power inequality, and interpolation techniques used in
             statistical physics and random matrix theory. This framework
             provides new insight into the structure of phase transitions
             in coding theory and compressed sensing. In particular, it
             is shown that the parallel combination of linear channels
             with freely-independent matrices can be characterized
             succinctly via free convolution.},
   Doi = {10.1109/ISIT.2018.8437326},
   Key = {fds337696}
}

@article{fds346298,
   Author = {Pfister, HD and Urbanke, RL},
   Title = {Near-Optimal Finite-Length Scaling for Polar Codes Over
             Large Alphabets},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {65},
   Number = {9},
   Pages = {5643-5655},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2019},
   Month = {September},
   url = {http://dx.doi.org/10.1109/tit.2019.2915595},
   Doi = {10.1109/tit.2019.2915595},
   Key = {fds346298}
}

@article{fds322713,
   Author = {Pfister, HD and Urbanke, R},
   Title = {Near-optimal finite-length scaling for polar codes over
             large alphabets},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2016-August},
   Pages = {215-219},
   Publisher = {IEEE},
   Year = {2016},
   Month = {August},
   ISBN = {9781509018062},
   url = {http://dx.doi.org/10.1109/ISIT.2016.7541292},
   Abstract = {For any prime power q, Mori and Tanaka introduced a family
             of q-ary polar codes based on q by q Reed-Solomon
             polarization kernels. For transmission over a q-ary erasure
             channel, they also derived a closed-form recursion for the
             erasure probability of each effective channel. In this
             paper, we use that expression to analyze the finite-length
             scaling of these codes on q-ary erasure channel with erasure
             probability ϵ ⋯ (0, 1). Our primary result is that, for
             any γ > 0 and δ > 0, there is a q0 such that, for all q
             ≥ q0, the fraction of effective channels with erasure rate
             at most N-γ is at least 1 - ϵ - O(N-1/2+δ), where N = qn
             is the blocklength. Since the gap to the channel capacity 1
             - ϵ cannot vanish faster than O(N-1/2), this establishes
             near-optimal finite-length scaling for this family of codes.
             Our approach can be seen as an extension of a similar
             analysis for binary polar codes by Mondelli, Hassani, and
             Urbanke.},
   Doi = {10.1109/ISIT.2016.7541292},
   Key = {fds322713}
}

@article{fds319413,
   Author = {PFISTER, HL and PFISTER, HD},
   Title = {NONCONTACT BODY MEASUREMENT FOR AUTOMATED APPAREL
             MANUFACTURING},
   Journal = {Factory Automation and Information Management},
   Pages = {529-536},
   Publisher = {CRC PRESS INC},
   Editor = {AHMAD, MM and SULLIVAN, WG},
   Year = {1991},
   Month = {January},
   ISBN = {0-8493-4210-4},
   Key = {fds319413}
}

@article{fds336002,
   Author = {Hager, C and Pfister, HD},
   Title = {Nonlinear interference mitigation via deep neural
             networks},
   Journal = {2018 Optical Fiber Communications Conference and Exposition,
             Ofc 2018 Proceedings},
   Pages = {1-3},
   Year = {2018},
   Month = {June},
   ISBN = {9781943580385},
   Abstract = {A neural-network-based approach is presented to efficiently
             implement digital backpropagation (DBP). For a 32×100 km
             fiber-optic link, the resulting 'learned' DBP significantly
             reduces the complexity compared to conventional DBP
             implementations.},
   Key = {fds336002}
}

@article{fds336003,
   Author = {Häger, C and Pfister, HD},
   Title = {Nonlinear interference mitigation via deep neural
             networks},
   Journal = {Optics Infobase Conference Papers},
   Volume = {Part F84-OFC 2018},
   Publisher = {OSA},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1364/OFC.2018.W3A.4},
   Abstract = {A neural-network-based approach is presented to efficiently
             implement digital backpropagation (DBP). For a 32×100 km
             fiber-optic link, the resulting “learned“ DBP
             significantly reduces the complexity compared to
             conventional DBP implementations.},
   Doi = {10.1364/OFC.2018.W3A.4},
   Key = {fds336003}
}

@article{fds319409,
   Author = {Soriaga, JB and Pfister, HD and Siegel, PH},
   Title = {On Approaching the Capacity of Finite-State Intersymbol
             Interference Channels},
   Volume = {687},
   Pages = {365-378},
   Publisher = {Springer US},
   Editor = {Blaum, M and Farrell, PG and VanTilborg, HCA},
   Year = {2002},
   ISBN = {9781441952899},
   url = {http://dx.doi.org/10.1007/978-1-4757-3585-7_21},
   Doi = {10.1007/978-1-4757-3585-7_21},
   Key = {fds319409}
}

@article{fds342175,
   Author = {Sheikh, A and GraellAmat, A and Liva, G and Häger, C and Pfister,
             HD},
   Title = {On Low-Complexity Decoding of Product Codes for
             High-Throughput Fiber-Optic Systems},
   Journal = {International Symposium on Turbo Codes and Iterative
             Information Processing, Istc},
   Volume = {2018-December},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ISTC.2018.8625279},
   Abstract = {We study low-complexity iterative decoding algorithms for
             product codes. We revisit two algorithms recently proposed
             by the authors based on bounded distance decoding (BDD) of
             the component codes that improve the performance of
             conventional iterative BDD (iBDD). We then propose a novel
             decoding algorithm that is based on generalized minimum
             distance decoding of the component codes. The proposed
             algorithm closes over 50% of the performance gap between
             iBDD and turbo product decoding (TPD) based on the
             Chase-Pyndiah algorithm at a bit error rate of 10-5.
             Moreover, the algorithm only leads to a limited increase in
             complexity with respect to iBDD and has significantly lower
             complexity than TPD. The studied algorithms are particularly
             interesting for high-throughput fiberoptic
             communications.},
   Doi = {10.1109/ISTC.2018.8625279},
   Key = {fds342175}
}

@article{fds319362,
   Author = {Nguyen, PS and Pfister, HD and Narayanan, KR},
   Title = {On multiple decoding attempts for Reed - Solomon codes: A
             rate-distortion approach},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {57},
   Number = {2},
   Pages = {668-691},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {February},
   url = {http://dx.doi.org/10.1109/TIT.2010.2095202},
   Abstract = {One popular approach to soft-decision decoding of Reed -
             Solomon (RS) codes is based on using multiple trials of a
             simple RS decoding algorithm in combination with erasing or
             flipping a set of symbols or bits in each trial. This paper
             presents a framework based on rate-distortion (RD) theory to
             analyze these multiple-decoding algorithms. By defining an
             appropriate distortion measure between an error pattern and
             an erasure pattern, the successful decoding condition, for a
             single errors-and-erasures decoding trial, becomes
             equivalent to distortion being less than a fixed threshold.
             Finding the best set of erasure patterns also turns into a
             covering problem that can be solved asymptotically by RD
             theory. Thus, the proposed approach can be used to
             understand the asymptotic performance-versus-complexity
             tradeoff of multiple errors-and-erasures decoding of RS
             codes. This initial result is also extended a few
             directions. The rate-distortion exponent (RDE) is computed
             to give more precise results for moderate blocklengths.
             Multiple trials of algebraic soft-decision (ASD) decoding
             are analyzed using this framework. Analytical and numerical
             computations of the RD and RDE functions are also presented.
             Finally, simulation results show that sets of erasure
             patterns designed using the proposed methods outperform
             other algorithms with the same number of decoding trials. ©
             2006 IEEE.},
   Doi = {10.1109/TIT.2010.2095202},
   Key = {fds319362}
}

@article{fds319379,
   Author = {Nguyen, PS and Pfister, HD and Narayanan, KR},
   Title = {On Multiple Decoding Attempts for Reed-Solomon
             Codes},
   Volume = {abs/1005.4461},
   Year = {2010},
   Key = {fds319379}
}

@article{fds349210,
   Author = {Rengaswamy, N and Calderbank, R and Newman, M and Pfister,
             HD},
   Title = {On Optimality of CSS Codes for Transversal
             T},
   Journal = {Ieee Journal on Selected Areas in Information
             Theory},
   Volume = {1},
   Number = {2},
   Pages = {499-514},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2020},
   Month = {August},
   url = {http://dx.doi.org/10.1109/jsait.2020.3012914},
   Doi = {10.1109/jsait.2020.3012914},
   Key = {fds349210}
}

@article{fds319320,
   Author = {Hager, C and Amat, AGI and Pfister, HD and Alvarado, A and Brannstrom,
             F and Agrell, E},
   Title = {On parameter optimization for staircase codes},
   Journal = {Conference on Optical Fiber Communication, Technical Digest
             Series},
   Volume = {2015-June},
   Year = {2015},
   Month = {June},
   ISBN = {9781557529374},
   url = {http://dx.doi.org/10.1364/ofc.2015.th3e.3},
   Abstract = {We discuss the optimization of staircase code parameters
             based on density evolution. An extension of the original
             code construction is proposed, leading to codes with steeper
             waterfall performance.},
   Doi = {10.1364/ofc.2015.th3e.3},
   Key = {fds319320}
}

@article{fds359923,
   Author = {Häger, C and i Amat, AG and Pfister, HD and Alvarado, A and Brännström, F and Agrell, E},
   Title = {On parameter optimization for staircase codes},
   Journal = {Optical Fiber Communication Conference, Ofc
             2015},
   Year = {2015},
   Month = {March},
   ISBN = {9781557529374},
   Abstract = {We discuss the optimization of staircase code parameters
             based on density evolution. An extension of the original
             code construction is proposed, leading to codes with steeper
             waterfall performance.},
   Key = {fds359923}
}

@article{fds319319,
   Author = {Häger, C and i Amat, AG and Pfister, HD and Alvarado, A and Brännström, F and Agrell, E},
   Title = {On parameter optimization for staircase codes},
   Journal = {Optical Fiber Communication Conference, Ofc
             2015},
   Year = {2015},
   Month = {January},
   ISBN = {9781557529374},
   Abstract = {We discuss the optimization of staircase code parameters
             based on density evolution. An extension of the original
             code construction is proposed, leading to codes with steeper
             waterfall performance.},
   Key = {fds319319}
}

@article{fds319411,
   Author = {Pfister, HD and Soriaga, JB and Siegel, PH},
   Title = {On the achievable information rates of finite state ISI
             channels},
   Journal = {Conference Record / Ieee Global Telecommunications
             Conference},
   Volume = {5},
   Pages = {2992-2996},
   Year = {2001},
   Month = {December},
   Abstract = {In this paper, we present two simple Monte Carlo methods for
             estimating the achievable information rates of general
             finite state channels. Both methods require only the ability
             to simulate the channel with an a posteriori probability
             (APP) detector matched to the channel. The first method
             estimates the mutual information rate between the input
             random process and the output random process, provided that
             both processes are stationary and ergodic. When the inputs
             are i.i.d, equiprobable, this rate is known as the Symmetric
             Information Rate (SIR). The second method estimates the
             achievable information rate of an explicit coding system
             which interleaves m independent codes onto the channel and
             employs multistage decoding. For practical values of m,
             numerical results show that this system nearly achieves the
             SIR. Both methods are applied to the class of partial
             response channels commonly used in magnetic
             recording.},
   Key = {fds319411}
}

@article{fds359251,
   Author = {Rengaswamy, N and Pfister, HD},
   Title = {On the Duality between the BSC and Quantum
             PSC},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2021-July},
   Pages = {2232-2237},
   Year = {2021},
   Month = {July},
   ISBN = {9781538682098},
   url = {http://dx.doi.org/10.1109/ISIT45174.2021.9518034},
   Abstract = {In 2018, Renes [IEEE Trans. Inf. Theory, vol. 64, no. 1, pp.
             577-592 (2018)] developed a general theory of channel
             duality for classical-input quantum-output channels. His
             result shows that a number of well-known duality results for
             linear codes on the binary erasure channel can be extended
             to general classical channels at the expense of using dual
             problems which are intrinsically quantum mechanical. One
             special case of this duality is a connection between coding
             for error correction on the quantum pure-state channel (PSC)
             and coding for wiretap secrecy on the classical binary
             symmetric channel (BSC). Similarly, coding for error
             correction on the BSC is related to wire-tap secrecy on the
             PSC. While this result has important implications for
             classical coding, the machinery behind the general duality
             result is rather challenging for researchers without a
             strong background in quantum information theory. In this
             work, we leverage prior results for linear codes on PSCs to
             give an alternate derivation of the aforementioned special
             case by computing closed-form expressions for the
             performance metrics. The noted prior results include the
             optimality of square-root measurement for linear codes on
             the PSC and the Fourier duality of linear
             codes.},
   Doi = {10.1109/ISIT45174.2021.9518034},
   Key = {fds359251}
}

@article{fds343276,
   Author = {Zhang, F and Pfister, HD},
   Title = {On the Iterative Decoding of High-Rate LDPC Codes With
             Applications in Compressed Sensing},
   Year = {2009},
   Month = {March},
   Abstract = {This paper considers the performance of $(j,k)$-regular
             low-density parity-check (LDPC) codes with message-passing
             (MP) decoding algorithms in the high-rate regime. In
             particular, we derive the high-rate scaling law for MP
             decoding of LDPC codes on the binary erasure channel (BEC)
             and the $q$-ary symmetric channel ($q$-SC). For the BEC, the
             density evolution (DE) threshold of iterative decoding
             scales like $\Theta(k^{-1})$ and the critical stopping ratio
             scales like $\Theta(k^{-j/(j-2)})$. For the $q$-SC, the DE
             threshold of verification decoding depends on the details of
             the decoder and scales like $\Theta(k^{-1})$ for one
             decoder. Using the fact that coding over large finite
             alphabets is very similar to coding over the real numbers,
             the analysis of verification decoding is also extended to
             the the compressed sensing (CS) of strictly-sparse signals.
             A DE based approach is used to analyze the CS systems with
             randomized-reconstruction guarantees. This leads to the
             result that strictly-sparse signals can be reconstructed
             efficiently with high-probability using a constant
             oversampling ratio (i.e., when the number of measurements
             scales linearly with the sparsity of the signal). A
             stopping-set based approach is also used to get stronger
             (e.g., uniform-in-probability) reconstruction
             guarantees.},
   Key = {fds343276}
}

@article{fds319372,
   Author = {Kim, BH and Pfister, HD},
   Title = {On the joint decoding of LDPC codes and finite-state
             channels via linear programming},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {754-758},
   Year = {2010},
   Month = {August},
   ISBN = {9781424469604},
   url = {http://dx.doi.org/10.1109/ISIT.2010.5513614},
   Abstract = {In this paper, the linear programming (LP) decoder for
             binary linear codes, introduced by Feldman, et al. is
             extended to joint-decoding of binary-input finite-state
             channels. In particular, we provide a rigorous definition of
             LP joint-decoding pseudo-codewords (JD-PCWs) that enables
             evaluation of the pairwise error probability between
             codewords and JD-PCWs. This leads naturally to a provable
             upper bound on decoder failure probability. If the channel
             is a finite-state intersymbol interference channel, then the
             LP joint decoder also has the maximum-likelihood (ML)
             certificate property and all integer valued solutions are
             codewords. In this case, the performance loss relative to ML
             decoding can be explained completely by fractional valued
             JD-PCWs. © 2010 IEEE.},
   Doi = {10.1109/ISIT.2010.5513614},
   Key = {fds319372}
}

@article{fds319376,
   Author = {Kim, B-H and Pfister, HD},
   Title = {On the joint decoding of LDPC codes and finite-state
             channels via linear programming.},
   Journal = {Isit},
   Pages = {754-758},
   Publisher = {IEEE},
   Year = {2010},
   url = {http://dx.doi.org/10.1109/ISIT.2010.5513614},
   Doi = {10.1109/ISIT.2010.5513614},
   Key = {fds319376}
}

@article{fds319316,
   Author = {Li, S and Huang, YC and Liu, T and Pfister, HD},
   Title = {On the limits of treating interference as noise for two-user
             symmetric Gaussian interference channels},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2015-June},
   Pages = {1711-1715},
   Publisher = {IEEE},
   Year = {2015},
   Month = {September},
   ISBN = {9781467377041},
   url = {http://dx.doi.org/10.1109/ISIT.2015.7282748},
   Abstract = {The limits of treating interference as noise are studied for
             the canonical two-user symmetric Gaussian interference
             channel. A two-step approach is proposed for finding
             approximately optimal input distributions in the high
             signal-to-noise ratio (SNR) regime. First, approximately and
             precisely optimal input distributions are found for the
             Avestimehr-Diggavi-Tse (ADT) linear deterministic model.
             These distributions are then translated, systematically,
             into Gaussian models, which we show can achieve the sum
             capacity to within O(log log(SNR)).},
   Doi = {10.1109/ISIT.2015.7282748},
   Key = {fds319316}
}

@article{fds319404,
   Author = {Soriaga, JB and Pfister, HD and Siegel, PH},
   Title = {On the low-rate shannon limit for binary intersymbol
             interference channels},
   Journal = {Ieee Transactions on Communications},
   Volume = {51},
   Number = {12},
   Pages = {1962-1964},
   Year = {2003},
   Month = {December},
   url = {http://dx.doi.org/10.1109/TCOMM.2003.820724},
   Abstract = {For a discrete-time, binary-input Gaussian channel with
             finite intersymbol interference, we prove that reliable
             communication can be achieved if and only if Eb/No > log
             2/Gopt, for some constant Gopt that depends on the channel.
             To determine this constant, we consider the finite-state
             machine which represents the output sequences of the channel
             filter when driven by binary inputs. We then define Gopt as
             the maximum output power achieved by a simple cycle in this
             graph, and show that no other cycle or asymptotically long
             sequence can achieve an output power greater than this. We
             provide examples where the binary input constraint leads to
             a suboptimality, and other cases where binary signaling is
             just as effective as real signaling at very low
             signal-to-noise ratios.},
   Doi = {10.1109/TCOMM.2003.820724},
   Key = {fds319404}
}

@article{fds319350,
   Author = {Nguyen, PS and Yedla, A and Pfister, HD and Narayanan,
             KR},
   Title = {On the maximum a posteriori decoding thresholds of multiuser
             systems with erasures},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {2701-2705},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISBN = {9781467325790},
   url = {http://dx.doi.org/10.1109/ISIT.2012.6284013},
   Abstract = {A fundamental connection between the belief propagation (BP)
             and maximum a posteriori (MAP) decoding thresholds was
             derived by Méasson, Montanari, and Urbanke using the area
             theorem for extrinsic information transfer (EXIT) curves.
             This connection allows the MAP threshold, for the binary
             erasure channel, to be evaluated efficiently via an upper
             bound that can be shown to be tight in some cases. In this
             paper, a similar analysis is used to extend these results to
             several multiuser systems, namely a noisy Slepian-Wolf
             problem and a multiple-access channel with erasures. The
             simplicity of these channel models allows for rigorous
             analysis and enables the derivation of upper bounds on the
             MAP thresholds using EXIT area theorems. In some cases, one
             can also show these bounds are tight. One interesting
             application is that the MAP thresholds can be compared with
             the BP thresholds of spatially-coupled codes to verify
             threshold saturation for the corresponding systems. © 2012
             IEEE.},
   Doi = {10.1109/ISIT.2012.6284013},
   Key = {fds319350}
}

@article{fds319314,
   Author = {Hamidi-Sepehr, F and Chamberland, JF and Pfister,
             HD},
   Title = {On the Performance of Block Codes over Finite-State Channels
             in the Rare-Transition Regime},
   Journal = {Ieee Transactions on Communications},
   Volume = {63},
   Number = {11},
   Pages = {3974-3990},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2015},
   Month = {November},
   url = {http://dx.doi.org/10.1109/TCOMM.2015.2478794},
   Abstract = {Contemporary wireless networks are tasked with supporting
             different connection profiles, including real-time traffic
             and delay-sensitive communications. This creates a need to
             better understand the fundamental limits of forward error
             correction in non-asymptotic regimes. This paper
             characterizes the performance of block codes over
             finite-state channels and evaluates their queueing
             performance under maximum-likelihood decoding. Classical
             results from digital communications are revisited in the
             context of channels with rare transitions, and bounds on the
             probabilities of decoding failure are derived for random
             codes. This creates an analysis framework where channel
             dependencies within and across codewords are preserved.
             These results are subsequently integrated into a queueing
             problem formulation.},
   Doi = {10.1109/TCOMM.2015.2478794},
   Key = {fds319314}
}

@article{fds319345,
   Author = {Hamidi-Sepehr, F and Chamberland, JF and Pfister,
             HD},
   Title = {On the performance of random block codes over finite-state
             fading channels},
   Journal = {2012 50th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2012},
   Pages = {17-25},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   ISBN = {9781467345385},
   url = {http://dx.doi.org/10.1109/Allerton.2012.6483194},
   Abstract = {As the mobile application landscape expands, wireless
             networks are tasked with supporting different connection
             profiles, including real-time traffic and delay-sensitive
             communications. Among many ensuing engineering challenges is
             the need to better understand the fundamental limits of
             forward error correction in non-asymptotic regimes. This
             article seeks to characterize the performance of block codes
             over finite-state channels with memory. In particular,
             classical results from information theory are revisited in
             the context of channels with rare transitions, and bounds on
             the probabilities of decoding failure are derived for random
             codes. This creates an analysis framework where channel
             dependencies within and across codewords are preserved. Such
             results can subsequently be integrated into a queueing
             problem formulation. Overall, this study offers new insights
             about the potential impact of channel correlation on the
             performance of delay-aware, point-to-point communication
             links. © 2012 IEEE.},
   Doi = {10.1109/Allerton.2012.6483194},
   Key = {fds319345}
}

@article{fds319353,
   Author = {Hamidi-Sepehr, F and Chamberland, J-F and Pfister,
             HD},
   Title = {On the performance of random block codes over finite-state
             fading channels.},
   Journal = {Allerton Conference},
   Pages = {17-25},
   Publisher = {IEEE},
   Year = {2012},
   ISBN = {978-1-4673-4537-8},
   url = {http://dx.doi.org/10.1109/Allerton.2012.6483194},
   Doi = {10.1109/Allerton.2012.6483194},
   Key = {fds319353}
}

@article{fds319348,
   Author = {Hamidi-Sepehr, F and Pfister, HD and Chamberland,
             JF},
   Title = {On the queueing behavior of Gilbert-Elliott channels in the
             rare-transition regime},
   Journal = {2012 46th Annual Conference on Information Sciences and
             Systems, Ciss 2012},
   Publisher = {IEEE},
   Year = {2012},
   Month = {November},
   ISBN = {9781467331401},
   url = {http://dx.doi.org/10.1109/CISS.2012.6310913},
   Abstract = {This article considers the performance of random block codes
             over the Gilbert-Elliott channel and characterizes the
             queueing performance under maximum-likelihood decoding. The
             probability of decoding failure is upper bounded using an
             approximation that works well in the rare-transition regime
             and the bound is used to perform a queueing analysis. A
             Poisson arrival process is chosen to allow fair comparisons
             between different block lengths and code rates. A Markov
             chain, based on the queue length and channel state, is
             constructed and used to analyze the tail probability of the
             queue. Our methods are used to evaluate both the probability
             of decoding failure, under a constraint of the probability
             of undetected error, and the queueing performance. The main
             result is that, for random coding on the Gilbert-Elliott
             channel, the performance analysis based on upper bounds
             provides a very good estimate of both the system performance
             and the optimum code parameters. © 2012
             IEEE.},
   Doi = {10.1109/CISS.2012.6310913},
   Key = {fds319348}
}

@article{fds319373,
   Author = {Parag, P and Chamberland, JF and Pfister, HD and Narayanan,
             KR},
   Title = {On the queueing behavior of random codes over a
             gilbert-elliot erasure channel},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {1798-1802},
   Publisher = {IEEE},
   Year = {2010},
   Month = {August},
   ISBN = {9781424469604},
   url = {http://dx.doi.org/10.1109/ISIT.2010.5513296},
   Abstract = {This paper considers the queueing performance of a system
             that transmits coded data over a time-varying erasure
             channel. In our model, the queue length and channel state
             together form a Markov chain that depends on the system
             parameters. This gives a framework that allows a rigorous
             analysis of the queue as a function of the code rate. Most
             prior work in this area either ignores block-length (e.g.,
             fluid models) or assumes error-free communication using
             finite codes. This work enables one to determine when such
             assumptions provide good, or bad, approximations of true
             behavior. Moreover, it offers a new approach to optimize
             parameters and evaluate performance. This can be valuable
             for delay-sensitive systems that employ short block lengths.
             © 2010 IEEE.},
   Doi = {10.1109/ISIT.2010.5513296},
   Key = {fds319373}
}

@article{fds319378,
   Author = {Parag, P and Chamberland, J-F and Pfister, HD and Narayanan,
             KR},
   Title = {On the queueing behavior of random codes over a
             gilbert-elliot erasure channel.},
   Journal = {Isit},
   Pages = {1798-1802},
   Publisher = {IEEE},
   Year = {2010},
   url = {http://dx.doi.org/10.1109/ISIT.2010.5513296},
   Doi = {10.1109/ISIT.2010.5513296},
   Key = {fds319378}
}

@article{fds319332,
   Author = {Pfister, HD and Vontobel, PO},
   Title = {On the relevance of graph covers and zeta functions for the
             analysis of SPA decoding of cycle codes},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {3000-3004},
   Publisher = {IEEE},
   Year = {2013},
   Month = {December},
   ISBN = {9781479904464},
   url = {http://dx.doi.org/10.1109/ISIT.2013.6620776},
   Abstract = {For an arbitrary binary cycle code, we show that sum-product
             algorithm (SPA) decoding after infinitely many iterations
             equals symbolwise graph-cover decoding. We do this by
             characterizing the Bethe free energy function of the
             underlying normal factor graph (NFG) and by stating a global
             convergence proof of the SPA. We also show that the set of
             log-likelihood ratio vectors for which the SPA converges to
             the all-zero codeword is given by the region of convergence
             of the edge zeta function associated with the underlying
             NFG. The results in this paper justify the use of
             graph-cover pseudo-codewords and edge zeta functions to
             characterize the behavior of SPA decoding of cycle codes.
             These results have also implications for the analysis of
             attenuated sum-product and max-product algorithm decoding of
             low-density parity-check (LDPC) codes beyond cycle codes. ©
             2013 IEEE.},
   Doi = {10.1109/ISIT.2013.6620776},
   Key = {fds319332}
}

@article{fds322723,
   Author = {Oberg, M and Pfister, HD and Siegel, PH},
   Title = {Parity-accumulate codes for magnetic recording},
   Journal = {Digests of the Intermag Conference},
   Year = {2000},
   Month = {January},
   Abstract = {A turbo-like architecture for partial-response channels is
             presented. The architecture is based on the serial
             concatenation of an outer single parity check (SPC) with
             uniformly-interleaved rate-1 codes.},
   Key = {fds322723}
}

@article{fds353338,
   Author = {Hager, C and Pfister, HD},
   Title = {Physics-Based Deep Learning for Fiber-Optic Communication
             Systems},
   Journal = {Ieee Journal on Selected Areas in Communications},
   Volume = {39},
   Number = {1},
   Pages = {280-294},
   Year = {2021},
   Month = {January},
   url = {http://dx.doi.org/10.1109/JSAC.2020.3036950},
   Abstract = {We propose a new machine-learning approach for fiber-optic
             communication systems whose signal propagation is governed
             by the nonlinear Schrödinger equation (NLSE). Our main
             observation is that the popular split-step method (SSM) for
             numerically solving the NLSE has essentially the same
             functional form as a deep multi-layer neural network; in
             both cases, one alternates linear steps and pointwise
             nonlinearities. We exploit this connection by parameterizing
             the SSM and viewing the linear steps as general linear
             functions, similar to the weight matrices in a neural
             network. The resulting physics-based machine-learning model
             has several advantages over 'black-box' function
             approximators. For example, it allows us to examine and
             interpret the learned solutions in order to understand why
             they perform well. As an application, low-complexity
             nonlinear equalization is considered, where the task is to
             efficiently invert the NLSE. This is commonly referred to as
             digital backpropagation (DBP). Rather than employing neural
             networks, the proposed algorithm, dubbed learned DBP (LDBP),
             uses the physics-based model with trainable filters in each
             step and its complexity is reduced by progressively pruning
             filter taps during gradient descent. Our main finding is
             that the filters can be pruned to remarkably short lengths -
             as few as 3 taps/step - without sacrificing performance. As
             a result, the complexity can be reduced by orders of
             magnitude in comparison to prior work. By inspecting the
             filter responses, an additional theoretical justification
             for the learned parameter configurations is provided. Our
             work illustrates that combining data-driven optimization
             with existing domain knowledge can generate new insights
             into old communications problems.},
   Doi = {10.1109/JSAC.2020.3036950},
   Key = {fds353338}
}

@article{fds355460,
   Author = {Pfister, HD and Tal, I},
   Title = {Polar Codes for Channels with Insertions, Deletions, and
             Substitutions},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2021-July},
   Pages = {2554-2559},
   Year = {2021},
   Month = {July},
   url = {http://dx.doi.org/10.1109/ISIT45174.2021.9517755},
   Abstract = {This paper presents a coding scheme for an insertion
             deletion substitution channel. We extend a previous scheme
             for the deletion channel where polar codes are modified by
             adding 'guard bands' between segments. In the new scheme,
             each guard band is comprised of a middle segment of '1'
             symbols, and left and right segments of '0' symbols. Our
             coding scheme allows for a regular hidden-Markov input
             distribution, and achieves the information rate between the
             input and corresponding output of such a distribution. Thus,
             we prove that our scheme can be used to efficiently achieve
             the capacity of the channel. The probability of error of our
             scheme decays exponentially in the cube-root of the block
             length.},
   Doi = {10.1109/ISIT45174.2021.9517755},
   Key = {fds355460}
}

@article{fds361239,
   Author = {Tal, I and Pfister, HD and Fazeli, A and Vardy, A},
   Title = {Polar Codes for the Deletion Channel: Weak and Strong
             Polarization},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {68},
   Number = {4},
   Pages = {2239-2265},
   Year = {2022},
   Month = {April},
   url = {http://dx.doi.org/10.1109/TIT.2021.3136440},
   Abstract = {This paper presents the first proof of polarization for the
             deletion channel with a constant deletion rate and a regular
             hidden-Markov input distribution. A key part of this work
             involves representing the deletion channel using a trellis
             and describing the plus and minus polar-decoding operations
             on that trellis. In particular, the plus and minus
             operations can be seen as combining adjacent trellis stages
             to yield a new trellis with half as many stages. Using this
             viewpoint, we prove a weak polarization theorem for standard
             polar codes on the deletion channel. To achieve strong
             polarization, we modify this scheme by adding guard bands of
             repeated zeros between various parts of the codeword. This
             gives a scheme whose rate approaches the mutual information
             and whose probability of error decays exponentially in the
             cube-root of the block length. We conclude by showing that
             this scheme can achieve capacity on the deletion channel by
             proving that the capacity of the deletion channel can be
             achieved by a sequence of regular hidden-Markov input
             distributions.},
   Doi = {10.1109/TIT.2021.3136440},
   Key = {fds361239}
}

@article{fds346726,
   Author = {Tal, I and Pfister, HD and Fazeli, A and Vardy, A},
   Title = {Polar Codes for the Deletion Channel: Weak and Strong
             Polarization},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2019-July},
   Pages = {1362-1366},
   Year = {2019},
   Month = {July},
   url = {http://dx.doi.org/10.1109/ISIT.2019.8849705},
   Abstract = {This paper presents the first proof of polarization for the
             deletion channel with a constant deletion rate and a regular
             hidden-Markov input distribution. A key part of this work
             involves representing the deletion channel using a trellis
             and describing the plus and minus polar-decoding operations
             on this trellis. In particular, the plus and minus
             operations can be seen as combining adjacent trellis stages
             to yield a new trellis with half as many stages. Using this
             viewpoint, we prove a weak polarization theorem for standard
             polar codes on the deletion channel. To achieve strong
             polarization, we modify this scheme by adding guard bands of
             repeated zeros between various parts of the codeword. Using
             this approach, we obtain a scheme whose rate approaches the
             mutual information and whose probability of error decays
             exponentially in the cube-root of the block
             length.},
   Doi = {10.1109/ISIT.2019.8849705},
   Key = {fds346726}
}

@article{fds354307,
   Author = {Buchberger, A and Hager, C and Pfister, HD and Schmalen, L and Graell I
             Amat and A},
   Title = {Pruning and Quantizing Neural Belief Propagation
             Decoders},
   Journal = {Ieee Journal on Selected Areas in Communications},
   Volume = {39},
   Number = {7},
   Pages = {1957-1966},
   Year = {2021},
   Month = {July},
   url = {http://dx.doi.org/10.1109/JSAC.2020.3041392},
   Abstract = {We consider near maximum-likelihood (ML) decoding of short
             linear block codes. In particular, we propose a novel
             decoding approach based on neural belief propagation (NBP)
             decoding recently introduced by Nachmani et al. in which we
             allow a different parity-check matrix in each iteration of
             the algorithm. The key idea is to consider NBP decoding over
             an overcomplete parity-check matrix and use the weights of
             NBP as a measure of the importance of the check nodes (CNs)
             to decoding. The unimportant CNs are then pruned. In
             contrast to NBP, which performs decoding on a given fixed
             parity-check matrix, the proposed pruning-based neural
             belief propagation (PB-NBP) typically results in a different
             parity-check matrix in each iteration. For a given
             complexity in terms of CN evaluations, we show that PB-NBP
             yields significant performance improvements with respect to
             NBP. We apply the proposed decoder to the decoding of a
             Reed-Muller code, a short low-density parity-check (LDPC)
             code, and a polar code. PB-NBP outperforms NBP decoding over
             an overcomplete parity-check matrix by 0.27-0.31 dB while
             reducing the number of required CN evaluations by up to 97%.
             For the LDPC code, PB-NBP outperforms conventional belief
             propagation with the same number of CN evaluations by 0.52
             dB. We further extend the pruning concept to offset min-sum
             decoding and introduce a pruning-based neural offset min-sum
             (PB-NOMS) decoder, for which we jointly optimize the offsets
             and the quantization of the messages and offsets. We
             demonstrate performance 0.5 dB from ML decoding with 5-bit
             quantization for the Reed-Muller code.},
   Doi = {10.1109/JSAC.2020.3041392},
   Key = {fds354307}
}

@article{fds352260,
   Author = {Buchberger, A and Hager, C and Pfister, HD and Schmalen, L and I Amat,
             AG},
   Title = {Pruning Neural Belief Propagation Decoders},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2020-June},
   Pages = {338-342},
   Year = {2020},
   Month = {June},
   url = {http://dx.doi.org/10.1109/ISIT44484.2020.9174097},
   Abstract = {We consider near maximum-likelihood (ML) decoding of short
             linear block codes based on neural belief propagation (BP)
             decoding recently introduced by Nachmani et al.. While this
             method significantly outperforms conventional BP decoding,
             the underlying parity-check matrix may still limit the
             overall performance. In this paper, we introduce a method to
             tailor an overcomplete parity-check matrix to (neural) BP
             decoding using machine learning. We consider the weights in
             the Tanner graph as an indication of the importance of the
             connected check nodes (CNs) to decoding and use them to
             prune unimportant CNs. As the pruning is not tied over
             iterations, the final decoder uses a different parity-check
             matrix in each iteration. For ReedMuller and short
             low-density parity-check codes, we achieve performance
             within 0.27dB and 1.5dB of the ML performance while reducing
             the complexity of the decoder.},
   Doi = {10.1109/ISIT44484.2020.9174097},
   Key = {fds352260}
}

@article{fds352262,
   Author = {Rengaswamy, N and Seshadreesan, KP and Guha, S and Pfister,
             HD},
   Title = {Quantum Advantage via Qubit Belief Propagation},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2020-June},
   Pages = {1824-1829},
   Year = {2020},
   Month = {June},
   ISBN = {9781728164328},
   url = {http://dx.doi.org/10.1109/ISIT44484.2020.9174494},
   Abstract = {Quantum technologies are maturing by the day and their
             near-term applications are now of great interest. Deep-space
             optical communication involves transmission over the
             pure-state classical-quantum channel. For optimal detection,
             a joint measurement on all output qubits is required in
             general. Since this is hard to realize, current
             (sub-optimal) schemes perform symbol-by-symbol detection
             followed by classical post-processing. In this paper we
             focus on a recently proposed belief propagation algorithm by
             Renes that passes qubit messages on the factor graph of a
             classical error-correcting code. More importantly, it only
             involves single-qubit Pauli measurements during the process.
             For an example 5-bit code, we analyze the involved density
             matrices and calculate the error probabilities on this
             channel. Then we numerically compute the optimal joint
             detection limit using the Yuen-Kennedy-Lax conditions and
             demonstrate that the calculated error probabilities for this
             algorithm appear to achieve this limit. This represents a
             first step towards achieveing quantum communication
             advantage. We verify our analysis using Monte-Carlo
             simulations in practice.},
   Doi = {10.1109/ISIT44484.2020.9174494},
   Key = {fds352262}
}

@article{fds319359,
   Author = {Hamidi-Sepehr, F and Cai, Y and Pfister, HD and Chamberland,
             JF},
   Title = {Queueing behavior of the Gilbert-Elliott channel: BCH codes
             and Poisson arrivals},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {1806-1810},
   Publisher = {IEEE},
   Year = {2011},
   Month = {October},
   ISBN = {9781457705953},
   url = {http://dx.doi.org/10.1109/ISIT.2011.6033861},
   Abstract = {This paper considers the queueing performance of a
             communication system that transmits BCH-coded data over the
             correlated-error channel first studied by Gilbert and
             Elliott in the 1960s. For some arrival processes, one can
             join the queue length and channel state so that the pair
             forms a Markov chain; this provides a powerful tool to
             analyze the tail probability of the queue. For Bernoulli
             packet arrivals, this approach works but does not allow for
             fair comparisons between different block-length codes. In
             this paper, a Poisson arrival model is assumed in order to
             make fair comparisons between codes with arbitrary block
             length and code rate. This enables one to optimize code
             parameters for delay-sensitive communication systems over
             time-varying channels. Finally, the analysis is supported
             through a Monte Carlo simulation. © 2011
             IEEE.},
   Doi = {10.1109/ISIT.2011.6033861},
   Key = {fds319359}
}

@article{fds319396,
   Author = {Sason, I and Pfister, HD},
   Title = {Recent results on capacity-achieving codes for the erasure
             channel with bounded complexity},
   Journal = {Ieee Convention of Electrical and Electronics Engineers in
             Israel, Proceedings},
   Pages = {339-343},
   Publisher = {IEEE},
   Year = {2006},
   Month = {December},
   ISBN = {1424402301},
   url = {http://dx.doi.org/10.1109/EEEI.2006.321096},
   Abstract = {The paper introduces ensembles of accumulate-repeat-accumulate
             (ARA) codes which asymptotically achieve capacity on the
             binary erasure channel (BEC) with bounded complexity (per
             information bit). It also introduces symmetry properties
             which play a central role in the construction of various
             capacity-achieving ensembles for the BEC. The results
             improve on the tradeoff between performance and complexity
             provided by the first capacity-achieving ensembles of
             irregular repeat-accumulate (IRA) codes with bounded
             complexity (constructed by Pfister, Sason and Urbanke). The
             superiority of ARA codes with moderate to large block
             lengths is exemplified by computer simulations comparing
             their performance with those of previously reported
             capacity-achieving ensembles of LDPC and IRA codes. ARA
             codes also have the advantage of being systematic. © 2006
             IEEE.},
   Doi = {10.1109/EEEI.2006.321096},
   Key = {fds319396}
}

@article{fds319398,
   Author = {Sason, I and Pfister, HD},
   Title = {Recent results on capacity-achieving codes for the erasure
             channel with bounded complexity},
   Journal = {2006 Ieee 24th Convention of Electrical & Electronics
             Engineers in Israel},
   Pages = {343-+},
   Publisher = {IEEE},
   Year = {2006},
   Month = {January},
   ISBN = {978-1-4244-0229-8},
   Key = {fds319398}
}

@article{fds323532,
   Author = {Reeves, G and Pfister, HD},
   Title = {Reed-Muller Codes Achieve Capacity on BMS
             Channels},
   Pages = {658-669},
   Publisher = {ACM},
   Editor = {Wichs, D and Mansour, Y},
   Year = {2021},
   Month = {October},
   ISBN = {978-1-4503-4132-5},
   url = {http://dx.doi.org/10.1145/2897518.2897584},
   Abstract = {This paper considers the performance of long Reed-Muller
             (RM) codes transmitted over binary memoryless symmetric
             (BMS) channels under bitwise maximum-a-posteriori decoding.
             Its main result is that the family of binary RM codes
             achieves capacity on any BMS channel with respect to
             bit-error rate. This resolves a long-standing open problem
             that connects information theory and error-correcting codes.
             In contrast with the earlier result for the binary erasure
             channel, the new proof does not rely on hypercontractivity.
             Instead, it combines a nesting property of RM codes with new
             information inequalities relating the generalized extrinsic
             information transfer function and the extrinsic minimum
             mean-squared error.},
   Doi = {10.1145/2897518.2897584},
   Key = {fds323532}
}

@article{fds326795,
   Author = {Kudekar, S and Kumar, S and Mondelli, M and Pfister, HD and Sasoǧlu, E and Urbanke, RL},
   Title = {Reed-muller codes achieve capacity on erasure
             channels},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {63},
   Number = {7},
   Pages = {4298-4316},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2017},
   Month = {July},
   url = {http://dx.doi.org/10.1109/TIT.2017.2673829},
   Abstract = {We introduce a new approach to proving that a sequence of
             deterministic linear codes achieves capacity on an erasure
             channel under maximum a posteriori decoding. Rather than
             relying on the precise structure of the codes, our method
             exploits code symmetry. In particular, the technique applies
             to any sequence of linear codes where the blocklengths are
             strictly increasing, the code rates converge, and the
             permutation group of each code is doubly transitive. In
             other words, we show that symmetry alone implies
             near-optimal performance. An important consequence of this
             result is that a sequence of Reed-Muller codes with
             increasing blocklength and converging rate achieves
             capacity. This possibility has been suggested previously in
             the literature but it has only been proven for cases where
             the limiting code rate is 0 or 1. Moreover, these results
             extend naturally to all affine-invariant codes and, thus, to
             extended primitive narrow-sense BCH codes. This also
             resolves, in the affirmative, the existence question for
             capacity-achieving sequences of binary cyclic codes. The
             primary tools used in the proof are the sharp threshold
             property for symmetric monotone Boolean functions and the
             area theorem for extrinsic information transfer
             functions.},
   Doi = {10.1109/TIT.2017.2673829},
   Key = {fds326795}
}

@article{fds319313,
   Author = {Kudekar, S and Pfister, HD and Kumar, S and Şaşoǧlu, E and Mondelli,
             M and Urbanke, R},
   Title = {Reed-Muller codes achieve capacity on erasure
             channels},
   Journal = {Proceedings of the Annual Acm Symposium on Theory of
             Computing},
   Volume = {19-21-June-2016},
   Pages = {658-669},
   Publisher = {ACM Press},
   Year = {2016},
   Month = {June},
   ISBN = {9781450341325},
   url = {http://dx.doi.org/10.1145/2897518.2897584},
   Abstract = {We introduce a new approach to proving that a sequence of
             deterministic linear codes achieves capacity on an erasure
             channel under maximum a posteriori decoding. Rather than
             relying on the precise structure of the codes, our method
             exploits code symmetry. In particular, the technique applies
             to any sequence of linear codes where the block lengths are
             strictly increasing, the code rates converge, and the
             permutation group of each code is doubly transitive. In a
             nutshell, we show that symmetry alone implies near-optimal
             performance. An important consequence of this result is that
             a sequence of Reed-Muller codes with increasing block length
             and converging rate achieves capacity. This possibility has
             been suggested previously in the literature, but it has only
             been proven for cases where the limiting code rate is 0 or
             1. Moreover, these results extend naturally to
             affine-invariant codes and, thus, to all extended primitive
             narrow-sense BCH codes. This is used to resolve, in the
             affirmative, the existence question for capacity-achieving
             sequences of binary cyclic codes. The primary tools used in
             the proofs are the sharp threshold property for symmetric
             monotone boolean functions and the area theorem for
             extrinsic information transfer (EXIT) functions.},
   Doi = {10.1145/2897518.2897584},
   Key = {fds319313}
}

@article{fds319311,
   Author = {Kumar, S and Calderbank, R and Pfister, HD},
   Title = {Reed-muller codes achieve capacity on the quantum erasure
             channel},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2016-August},
   Pages = {1750-1754},
   Publisher = {IEEE},
   Year = {2016},
   Month = {August},
   ISBN = {9781509018062},
   url = {http://dx.doi.org/10.1109/ISIT.2016.7541599},
   Abstract = {The quantum erasure channel is the simplest example of a
             quantum communication channel and its information capacity
             is known precisely. The subclass of quantum error-correcting
             codes called stabilizer codes is known to contain
             capacity-achieving sequences for the quantum erasure
             channel, but no efficient method is known to construct these
             sequences. In this article, we explicitly describe a
             capacity-achieving code sequence for the quantum erasure
             channel. In particular, we show that Calderbank-Shor-Steane
             (CSS) stabilizer codes constructed from self-orthogonal
             binary linear codes are capacity-achieving on the quantum
             erasure channel if the binary linear codes are
             capacity-achieving on the binary erasure channel. Recently,
             Reed-Muller codes were shown to achieve capacity on
             classical erasure channels. Using this, we show that CSS
             codes constructed from binary Reed-Muller codes achieve the
             capacity of the quantum erasure channel. The
             capacity-achieving nature of these CSS codes is also
             explained from a GF(4) perspective.},
   Doi = {10.1109/ISIT.2016.7541599},
   Key = {fds319311}
}

@article{fds347303,
   Author = {Carpi, F and Hager, C and Martalo, M and Raheli, R and Pfister,
             HD},
   Title = {Reinforcement Learning for Channel Coding: Learned
             Bit-Flipping Decoding},
   Journal = {2019 57th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2019},
   Pages = {922-929},
   Publisher = {IEEE},
   Year = {2019},
   Month = {September},
   url = {http://dx.doi.org/10.1109/ALLERTON.2019.8919799},
   Abstract = {In this paper, we use reinforcement learning to find
             effective decoding strategies for binary linear codes. We
             start by reviewing several iterative decoding algorithms
             that involve a decision-making process at each step,
             including bit-flipping (BF) decoding, residual belief
             propagation, and anchor decoding. We then illustrate how
             such algorithms can be mapped to Markov decision processes
             allowing for data-driven learning of optimal decision
             strategies, rather than basing decisions on heuristics or
             intuition. As a case study, we consider BF decoding for both
             the binary symmetric and additive white Gaussian noise
             channel. Our results show that learned BF decoders can offer
             a range of performance-complexity trade-offs for the
             considered Reed-Muller and BCH codes, and achieve
             near-optimal performance in some cases. We also demonstrate
             learning convergence speed-ups when biasing the learning
             process towards correct decoding decisions, as opposed to
             relying only on random explorations and past
             knowledge.},
   Doi = {10.1109/ALLERTON.2019.8919799},
   Key = {fds347303}
}

@article{fds363132,
   Author = {Brandsen, S and Stubbs, KD and Pfister, HD},
   Title = {Reinforcement Learning with Neural Networks for Quantum
             Multiple Hypothesis Testing},
   Journal = {Quantum},
   Volume = {6},
   Year = {2022},
   Month = {January},
   url = {http://dx.doi.org/10.22331/Q-2022-01-26-633},
   Abstract = {Reinforcement learning with neural networks (RLNN) has
             recently demonstrated great promise for many problems,
             including some problems in quantum information theory. In
             this work, we apply RLNN to quantum hypothesis testing and
             determine the optimal measurement strategy for
             distinguishing between multiple quantum states {ρj} while
             minimizing the error probability. In the case where the
             candidate states correspond to a quantum system with many
             qubit subsystems, implementing the optimal measurement on
             the entire system is experimentally infeasible. We use RLNN
             to find locally-adaptive measurement strategies that are
             experimentally feasible, where only one quantum subsystem is
             measured in each round. We provide numerical results which
             demonstrate that RLNN successfully finds the optimal local
             approach, even for candidate states up to 20 subsystems. We
             additionally demonstrate that the RLNN strategy meets or
             exceeds the success probability for a modified locally
             greedy approach in each random trial. While the use of RLNN
             is highly successful for designing adaptive local
             measurement strategies, in general a significant gap can
             exist between the success probability of the optimal
             locally-adaptive measurement strategy and the optimal
             collective measurement. We build on previous work to provide
             a set of necessary and sufficient conditions for collective
             protocols to strictly outperform locally adaptive protocols.
             We also provide a new example which, to our knowledge, is
             the simplest known state set exhibiting a significant gap
             between local and collective protocols. This result raises
             interesting new questions about the gap between
             theoretically optimal measurement strategies and practically
             implementable measurement strategies.},
   Doi = {10.22331/Q-2022-01-26-633},
   Key = {fds363132}
}

@article{fds352264,
   Author = {Brandsen, S and Stubbs, KD and Pfister, HD},
   Title = {Reinforcement Learning with Neural Networks for Quantum
             Multiple Hypothesis Testing},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2020-June},
   Pages = {1897-1902},
   Year = {2020},
   Month = {June},
   ISBN = {9781728164328},
   url = {http://dx.doi.org/10.1109/ISIT44484.2020.9174150},
   Abstract = {Reinforcement learning with neural networks (RLNN) has
             recently demonstrated great promise for many problems,
             including some problems in quantum information theory. In
             this work, we apply reinforcement learning to quantum
             hypothesis testing, where one designs measurements that can
             distinguish between multiple quantum states j = 1 while
             minimizing the error probability. Although the Helstrom
             measurement is known to be optimal when there are m=2
             states, the general problem of finding a minimal-error
             measurement is challenging. Additionally, in the case where
             the candidate states correspond to a quantum system with
             many qubit subsystems, implementing the optimal measurement
             on the entire system may be impractical. In this work, we
             develop locally-adaptive measurement strategies that are
             experimentally feasible in the sense that only one quantum
             subsystem is measured in each round. RLNN is used to find
             the optimal measurement protocol for arbitrary sets of
             tensor product quantum states. Numerical results for the
             network performance are shown. In special cases, the neural
             network testing-policy achieves the same probability of
             success as the optimal collective measurement.},
   Doi = {10.1109/ISIT44484.2020.9174150},
   Key = {fds352264}
}

@article{fds349917,
   Author = {Oliari, V and Goossens, S and Hager, C and Liga, G and Butler, RM and Hout,
             MVD and Heide, SVD and Pfister, HD and Okonkwo, C and Alvarado,
             A},
   Title = {Revisiting Efficient Multi-Step Nonlinearity Compensation
             with Machine Learning: An Experimental Demonstration},
   Journal = {Journal of Lightwave Technology},
   Volume = {38},
   Number = {12},
   Pages = {3114-3124},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2020},
   Month = {June},
   url = {http://dx.doi.org/10.1109/JLT.2020.2994220},
   Abstract = {Efficient nonlinearity compensation in fiber-optic
             communication systems is considered a key element to go
             beyond the 'capacity crunch'. One guiding principle for
             previous work on the design of practical nonlinearity
             compensation schemes is that fewer steps lead to better
             systems. In this paper, we challenge this assumption and
             show how to carefully design multi-step approaches that
             provide better performance-complexity trade-offs than their
             few-step counterparts. We consider the recently proposed
             learned digital backpropagation (LDBP) approach, where the
             linear steps in the split-step method are re-interpreted as
             general linear functions, similar to the weight matrices in
             a deep neural network. Our main contribution lies in an
             experimental demonstration of this approach for a 25 Gbaud
             single-channel optical transmission system. It is shown how
             LDBP can be integrated into a coherent receiver DSP chain
             and successfully trained in the presence of various hardware
             impairments. Our results show that LDBP with limited
             complexity can achieve better performance than standard DBP
             by using very short, but jointly optimized, finite-impulse
             response filters in each step. This paper also provides an
             overview of recently proposed extensions of LDBP and we
             comment on potentially interesting avenues for future
             work.},
   Doi = {10.1109/JLT.2020.2994220},
   Key = {fds349917}
}

@article{fds347671,
   Author = {Häger, C and Pfister, HD and Bütler, RM and Liga, G and Alvarado,
             A},
   Title = {Revisiting multi-step nonlinearity compensation with machine
             learning},
   Journal = {Iet Conference Publications},
   Volume = {2019},
   Number = {CP765},
   Year = {2019},
   Month = {January},
   Abstract = {For the efficient compensation of fiber nonlinearity, one of
             the guiding principles appears to be: fewer steps are better
             and more efficient. We challenge this assumption and show
             that carefully designed multi-step approaches can lead to
             better performance-complexity trade-offs than their few-step
             counterparts.},
   Key = {fds347671}
}

@article{fds325508,
   Author = {Sabag, O and Permuter, HH and Pfister, HD},
   Title = {Single-letter bounds on the feedback capacity of unifilar
             finite-state channels},
   Journal = {2016 Ieee International Conference on the Science of
             Electrical Engineering, Icsee 2016},
   Publisher = {IEEE},
   Year = {2017},
   Month = {January},
   ISBN = {9781509021529},
   url = {http://dx.doi.org/10.1109/ICSEE.2016.7806200},
   Abstract = {Upper and lower bounds on the feedback capacity of unifilar
             finite-state channels (FSCs) are derived. The upper bound is
             derived using a new technique, called the Q-contexts, which
             is based on a construction of a directed graph that is used
             to quantize recursively the receiver's output sequences to a
             finite set of contexts. For any choice of Q-graph, the
             feedback capacity is bounded by a single-letter expression,
             Cfb ≤ sup I (X, S; Y |Q), where the supremum is over
             Px|s,q and the distribution of (S, Q) is their stationary
             distribution. The bound is tight for all unifilar FSCs where
             feedback capacity is known: channels where the state is a
             function of the outputs, the trapdoor channel, Ising
             channels, the no-consecutive-ones input-constrained erasure
             channel and for the memoryless channel. The upper bound
             indicates that a single-letter expression might exist for
             the capacity of finite-state channels with or without
             feedback which are based on a construction of auxiliary
             random variable with memory, such as Q-graph, and not with
             i.i.d distribution. The lower bound provides a sufficient
             condition for the optimality of the upper bound, however, it
             is formulated such that independent lower bounds on feedback
             capacity may be calculated. The efficiency of these bounds
             is demonstrated by deriving a new capacity result for the
             dicode erasure channel (DEC). The upper bound also serves as
             a non-trivial bound on the capacity of channels without
             feedback, a problem that is still open.},
   Doi = {10.1109/ICSEE.2016.7806200},
   Key = {fds325508}
}

@article{fds343273,
   Author = {Nguyen, PS and Yedla, A and Pfister, HD and Narayanan,
             KR},
   Title = {Spatially-Coupled Codes and Threshold Saturation on
             Intersymbol-Interference Channels},
   Year = {2011},
   Month = {July},
   Abstract = {Recently, it has been observed that terminated
             low-density-parity-check (LDPC) convolutional codes (or
             spatially-coupled codes) appear to approach capacity
             universally across the class of binary memoryless channels.
             This is facilitated by the "threshold saturation" effect
             whereby the belief-propagation (BP) threshold of the
             spatially-coupled ensemble is boosted to the maximum
             a-posteriori (MAP) threshold of the underlying constituent
             ensemble. In this paper, we consider the universality of
             spatially-coupled codes over intersymbol-interference (ISI)
             channels under joint iterative decoding. More specifically,
             we empirically show that threshold saturation also occurs
             for the considered problem. This can be observed by first
             identifying the EXIT curve for erasure noise and the GEXIT
             curve for general noise that naturally obey the general area
             theorem. From these curves, the corresponding MAP and the BP
             thresholds are then numerically obtained. With the fact that
             regular LDPC codes can achieve the symmetric information
             rate (SIR) under MAP decoding, spatially-coupled codes with
             joint iterative decoding can universally approach the SIR of
             ISI channels. For the dicode erasure channel, Kudekar and
             Kasai recently reported very similar results based on
             EXIT-like curves.},
   Key = {fds343273}
}

@article{fds319327,
   Author = {Kumar, S and Vem, A and Narayanan, K and Pfister,
             HD},
   Title = {Spatially-coupled codes for side-information
             problems},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {516-520},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISBN = {9781479951864},
   url = {http://dx.doi.org/10.1109/ISIT.2014.6874886},
   Abstract = {For compound LDGM/LDPC codes with maximum a posteriori (MAP)
             processing, Wainwright and Martinian showed that the
             information-theoretic rate regions of the Wyner-Ziv (WZ) and
             Gelfand-Pinsker (GP) problems are achievable. For the same
             ensemble, these rates do not appear to be achievable with
             message-passing guided decimation (GD). Fortunately,
             spatially-coupled (SC) codes seem to provide an elegant
             remedy when iterative decoding falls short of MAP decoding.
             In particular, Aref et al. recently introduced SC LDGM codes
             that approach the rate-distortion region with
             belief-propagation guided decimation (BPGD). In this paper,
             we show that SC compound LDGM/LDPC codes with BPGD can
             approach the rate regions of the WZ and GP problems. © 2014
             IEEE.},
   Doi = {10.1109/ISIT.2014.6874886},
   Key = {fds319327}
}

@article{fds322716,
   Author = {Kumar, S and Vem, A and Narayanan, K and Pfister,
             HD},
   Title = {Spatially-coupled codes for write-once memories},
   Journal = {2015 53rd Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2015},
   Pages = {125-131},
   Publisher = {IEEE},
   Year = {2016},
   Month = {April},
   ISBN = {9781509018239},
   url = {http://dx.doi.org/10.1109/ALLERTON.2015.7446994},
   Abstract = {The focus of this article is on low-complexity
             capacity-achieving coding schemes for write-once memory
             (WOM) systems. The construction is based on
             spatially-coupled compound LDGM/LDPC codes. Both noiseless
             systems and systems with read errors are considered.
             Compound LDGM/LDPC codes are known to achieve capacity under
             MAP decoding for the closely related Gelfand-Pinsker problem
             and their coset decomposition provides an elegant way to
             encode the messages while simultaneously providing error
             protection. The application of compound codes to the WOM
             system is new. The main result is that spatial coupling
             enables these codes to achieve the capacity region of the
             2-write WOM system with low-complexity message-passing
             encoding and decoding algorithms.},
   Doi = {10.1109/ALLERTON.2015.7446994},
   Key = {fds322716}
}

@article{fds319334,
   Author = {Tunali, NE and Narayanan, KR and Pfister, HD},
   Title = {Spatially-coupled low density lattices based on construction
             a with applications to compute-and-forward},
   Journal = {2013 Ieee Information Theory Workshop, Itw
             2013},
   Publisher = {IEEE},
   Year = {2013},
   Month = {December},
   ISBN = {9781479913237},
   url = {http://dx.doi.org/10.1109/ITW.2013.6691275},
   Abstract = {We consider a class of lattices built using Construction A,
             where the underlying code is a non-binary spatially-coupled
             low density parity check code. We refer to these lattices as
             spatially-coupled LDA (SCLDA) lattices. SCLDA lattices can
             be constructed over integers, Gaussian integers and
             Eisenstein integers. We empirically study the performance of
             SCLDA lattices under belief propagation (BP) decoding.
             Ignoring the rate loss from termination, simulation results
             show that the BP thresholds of SCLDA lattices over integers
             is 0.11 dB (0.34 dB with the rate loss) and the BP
             thresholds for SCLDA lattices over Eisenstein integers are
             0.08 dB from the Poltyrev limit (0.19 dB with the rate
             loss). Motivated by this result, we use SCLDA lattice codes
             over Eisenstein integers for implementing a
             compute-and-forward protocol. For the examples considered in
             this paper, the thresholds for the proposed lattice codes
             are within 0.28 dB from the achievable rate of this coding
             scheme and within 1.06 dB from the achievable computation
             rate of Nazer and Gastpar's coding scheme in [6] extended to
             Eisenstein integers. © 2013 IEEE.},
   Doi = {10.1109/ITW.2013.6691275},
   Key = {fds319334}
}

@article{fds319333,
   Author = {Obata, N and Jian, YY and Kasai, K and Pfister, HD},
   Title = {Spatially-coupled multi-edge type LDPC codes with bounded
             degrees that achieve capacity on the BEC under BP
             decoding},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {2433-2437},
   Publisher = {IEEE},
   Year = {2013},
   Month = {December},
   ISBN = {9781479904464},
   url = {http://dx.doi.org/10.1109/ISIT.2013.6620663},
   Abstract = {Convolutional (or spatially-coupled) low-density
             parity-check (LDPC) codes have now been shown to approach
             capacity for a variety of problems. Yet, most of these
             results require sequences of regular LDPC ensembles with
             increasing variable and check degrees. Previously, Kasai and
             Sakaniwa showed empirically that, for the BEC, this
             limitation can be overcome by using spatially-coupled
             MacKay-Neal (MN) and Hsu-Anastasopoulos (HA) ensembles. In
             this paper, we prove this analytically for (k, 2, 2)-MN and
             (2, k, 2)-HA ensembles when k is at least 3. The proof is
             based on the simple approach to threshold saturation,
             introduced by Yedla et al., which relies on potential
             functions. The key step is verifying the non-negativity of a
             potential function associated with the uncoupled system.
             Along the way, we derive the potential function general
             multi-edge type (MET) LDPC ensembles and establish a duality
             relationship between dual ensembles of MET LDPC codes. ©
             2013 IEEE.},
   Doi = {10.1109/ISIT.2013.6620663},
   Key = {fds319333}
}

@article{fds319407,
   Author = {Askar, NK and Lin, SC and Pfister, HD and Rogerson, GE and Furuno,
             DS},
   Title = {Spectral keying/spl trade/: A novel modulation scheme for
             UWB systems},
   Journal = {2003 Ieee Conference on Ultra Wideband Systems and
             Technologies, Uwbst 2003 Conference Proceedings},
   Pages = {418-422},
   Publisher = {IEEE},
   Year = {2003},
   Month = {January},
   ISBN = {0780381874},
   url = {http://dx.doi.org/10.1109/UWBST.2003.1267876},
   Abstract = {Spectral keying (SK) is a novel UWB modulation scheme that
             combines properties of both frequency shift keying and pulse
             position modulation. An SK symbol consists of a number of
             pulses where each pulse is from a different frequency band.
             It relies on dividing the available spectrum into multiple
             bands. The information is encoded by the sequence of the
             sent bands. It is found that the SK available symbol set
             scales factorially with the number of frequency bands in
             use. Theoretical analysis and simulations of a five band SK
             system are provided. Measurement results from an
             experimental SK system are also included. © 2003
             IEEE.},
   Doi = {10.1109/UWBST.2003.1267876},
   Key = {fds319407}
}

@article{fds366158,
   Author = {Coskun, MC and Liva, G and Graell I Amat and A and Lentmaier, M and Pfister, HD},
   Title = {Successive Cancellation Decoding of Single Parity-Check
             Product Codes: Analysis and Improved Decoding},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {69},
   Number = {2},
   Pages = {823-841},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2023},
   Month = {February},
   url = {http://dx.doi.org/10.1109/TIT.2022.3207802},
   Abstract = {A product code with single parity-check component codes can
             be described via the tools of a multi-kernel polar code,
             where the rows of the generator matrix are chosen according
             to the constraints imposed by the product code construction.
             Following this observation, successive cancellation decoding
             of such codes is introduced. In particular, the error
             probability of single parity-check product codes over binary
             memoryless symmetric channels under successive cancellation
             decoding is characterized. A bridge with the analysis of
             product codes introduced by Elias is also established for
             the binary erasure channel. Successive cancellation list
             decoding of single parity-check product codes is then
             described. For the provided example, simulations over the
             binary input additive white Gaussian channel show that
             successive cancellation list decoding outperforms belief
             propagation decoding applied to the code graph. Finally, the
             performance of the concatenation of a product code with a
             high-rate outer code is investigated via distance spectrum
             analysis. Examples of concatenations performing within 0.7
             dB from the random coding union bound are
             provided.},
   Doi = {10.1109/TIT.2022.3207802},
   Key = {fds366158}
}

@article{fds352261,
   Author = {Coskun, MC and Neu, J and Pfister, HD},
   Title = {Successive Cancellation Inactivation Decoding for Modified
             Reed-Muller and eBCH Codes},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2020-June},
   Pages = {437-442},
   Year = {2020},
   Month = {June},
   url = {http://dx.doi.org/10.1109/ISIT44484.2020.9174226},
   Abstract = {A successive cancellation (SC) decoder with inactivations is
             proposed as an efficient implementation of SC list (SCL)
             decoding over the binary erasure channel. The proposed
             decoder assigns a dummy variable to an information bit
             whenever it is erased during SC decoding and continues with
             decoding. Inactivated bits are resolved using information
             gathered from decoding frozen bits. This decoder leverages
             the structure of the Hadamard matrix, but can be applied to
             any linear code by representing it as a polar code with
             dynamic frozen bits. SCL decoders are partially
             characterized using density evolution to compute the average
             number of inactivations required to achieve the maximum
             a-posteriori decoding performance. The proposed measure
             quantifies the performance vs. complexity trade-off and
             provides new insight into dynamics of the number of paths in
             SCL decoding. The technique is applied to analyze
             Reed-Muller (RM) codes with dynamic frozen bits. It is shown
             that these modified RM codes perform close to extended BCH
             codes.},
   Doi = {10.1109/ISIT44484.2020.9174226},
   Key = {fds352261}
}

@article{fds319315,
   Author = {Pfister, HD and Emmadi, SK and Narayanan, K},
   Title = {Symmetric product codes},
   Journal = {2015 Information Theory and Applications Workshop, Ita 2015
             Conference Proceedings},
   Pages = {282-290},
   Publisher = {IEEE},
   Year = {2015},
   Month = {October},
   ISBN = {9781479971954},
   url = {http://dx.doi.org/10.1109/ITA.2015.7309002},
   Abstract = {Product codes were introduced by Elias in 1954 and
             generalized by Tanner in 1981. Recently, a number of
             generalized product codes have been proposed for forward
             error-correction in high-speed optical communication. In
             practice, these codes are decoded by iteratively decoding
             each of the component codes. Symmetric product codes are a
             subclass of generalized product codes that use symmetry to
             reduce the block length of a product code while using the
             same component code. One example of this subclass, dubbed
             half-product codes, was introduced by Tanner in 1981 and
             then generalized by Justesen in 2011. In this paper, we
             discuss some initial results on symmetric product codes. Our
             results show that: (i) these codes have a larger normalized
             minimum distance than the product code from which they are
             derived, (ii) some small constructions achieve the largest
             minimum distance possible for a linear code, and (iii) they
             can have better performance in both the waterfall region and
             the error floor when compared to a product code of similar
             length and rate.},
   Doi = {10.1109/ITA.2015.7309002},
   Key = {fds319315}
}

@article{fds336004,
   Author = {Rengaswamy, N and Calderbank, R and Pfister, HD and Kadhe,
             S},
   Title = {Synthesis of Logical Clifford Operators via Symplectic
             Geometry},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2018-June},
   Pages = {791-795},
   Publisher = {IEEE},
   Year = {2018},
   Month = {August},
   url = {http://dx.doi.org/10.1109/ISIT.2018.8437652},
   Abstract = {Quantum error-correcting codes can be used to protect qubits
             involved in quantum computation. This requires that logical
             operators acting on protected qubits be translated to
             physical operators (circuits) acting on physical quantum
             states. We propose a mathematical framework for synthesizing
             physical circuits that implement logical Clifford operators
             for stabilizer codes. Circuit synthesis is enabled by
             representing the desired physical Clifford operator in
             \mathbb{C}-{N\times N} as a 2m\times 2m binary sym-plectic
             matrix, where N=2-{m}. We show that for an \!\!\!\![\!\!\![\
             {m, m-k}\ ]\!\!\!]\!\!\!\! stabilizer code every logical
             Clifford operator has 2-{k(k+1)/2} symplectic solutions, and
             we enumerate them efficiently using symplectic
             transvections. The desired circuits are then obtained by
             writing each of the solutions as a product of elementary
             symplectic matrices. For a given operator, our assembly of
             all of its physical realizations enables optimization over
             them with respect to a suitable metric. Our method of
             circuit synthesis can be applied to any stabilizer code, and
             this paper provides a proof of concept synthesis of
             universal Clifford gates for the well-known
             \!\!\!\![\!\!\![\ 6,4,2\ ]\!\!\!]\!\!\!\! code. Programs
             implementing our algorithms can be found at
             https://github.com/nrenga/symplectic-arxiv18a.},
   Doi = {10.1109/ISIT.2018.8437652},
   Key = {fds336004}
}

@article{fds343274,
   Author = {Pfister, HD},
   Title = {The capacity of finite-state channels in the high-noise
             regime},
   Publisher = {Cambridge University Press},
   Year = {2011},
   Month = {May},
   url = {http://dx.doi.org/10.1017/cbo9780511819407.007},
   Doi = {10.1017/cbo9780511819407.007},
   Key = {fds343274}
}

@article{fds319366,
   Author = {Kudekar, S and Pfister, HD},
   Title = {The effect of spatial coupling on compressive
             sensing},
   Journal = {2010 48th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2010},
   Pages = {347-353},
   Publisher = {IEEE},
   Year = {2010},
   Month = {December},
   ISBN = {9781424482146},
   url = {http://dx.doi.org/10.1109/ALLERTON.2010.5706927},
   Abstract = {Recently, it was observed that spatially-coupled LDPC code
             ensembles approach the Shannon capacity for a class of
             binary-input memoryless symmetric (BMS) channels. The
             fundamental reason for this was attributed to a threshold
             saturation phenomena derived in [1]. In particular, it was
             shown that the belief propagation (BP) threshold of the
             spatially coupled codes is equal to the maximum a posteriori
             (MAP) decoding threshold of the underlying constituent
             codes. In this sense, the BP threshold is saturated to its
             maximum value. Moreover, it has been empirically observed
             that the same phenomena also occurs when transmitting over
             more general classes of BMS channels. In this paper, we show
             that the effect of spatial coupling is not restricted to the
             realm of channel coding. The effect of coupling also
             manifests itself in compressed sensing. Specifically, we
             show that spatially-coupled measurement matrices have an
             improved sparsity to sampling threshold for reconstruction
             algorithms based on verification decoding. For BP-based
             reconstruction algorithms, this phenomenon is also tested
             empirically via simulation. At the block lengths accesible
             via simulation, the effect is rather small but, based on the
             threshold analysis, we believe this warrants further study.
             ©2010 IEEE.},
   Doi = {10.1109/ALLERTON.2010.5706927},
   Key = {fds319366}
}

@article{fds322714,
   Author = {Reeves, G and Pfister, HD},
   Title = {The replica-symmetric prediction for compressed sensing with
             Gaussian matrices is exact},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2016-August},
   Pages = {665-669},
   Publisher = {IEEE},
   Year = {2016},
   Month = {August},
   ISBN = {9781509018062},
   url = {http://dx.doi.org/10.1109/ISIT.2016.7541382},
   Abstract = {This paper considers the fundamental limit of compressed
             sensing for i.i.d. signal distributions and i.i.d. Gaussian
             measurement matrices. Its main contribution is a rigorous
             characterization of the asymptotic mutual information (MI)
             and minimum mean-square error (MMSE) in this setting. Under
             mild technical conditions, our results show that the
             limiting MI and MMSE are equal to the values predicted by
             the replica method from statistical physics. This resolves a
             well-known problem that has remained open for over a
             decade.},
   Doi = {10.1109/ISIT.2016.7541382},
   Key = {fds322714}
}

@article{fds342383,
   Author = {Reeves, G and Pfister, HD},
   Title = {The Replica-Symmetric Prediction for Random Linear
             Estimation With Gaussian Matrices Is Exact},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {65},
   Number = {4},
   Pages = {2252-2283},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1109/TIT.2019.2891664},
   Abstract = {This paper considers the fundamental limit of random linear
             estimation for i.i.d. signal distributions and i.i.d.
             Gaussian measurement matrices. Its main contribution is a
             rigorous characterization of the asymptotic mutual
             information (MI) and minimum mean-square error (MMSE) in
             this setting. Under mild technical conditions, our results
             show that the limiting MI and MMSE are equal to the values
             predicted by the replica method from statistical physics.
             This resolves a well-known problem that has remained open
             for over a decade.},
   Doi = {10.1109/TIT.2019.2891664},
   Key = {fds342383}
}

@article{fds319406,
   Author = {Pfister, HD and Siegel, PH},
   Title = {The serial concatenation of rate-1 codes through uniform
             random interleaves},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {49},
   Number = {6},
   Pages = {1425-1438},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2003},
   Month = {June},
   url = {http://dx.doi.org/10.1109/TIT.2003.811907},
   Abstract = {Until the analysis of Repeat Accumulate codes by Divsalar et
             al., few people would have guessed that simple rate-1 codes
             could play a crucial role in the construction of "good"
             binary codes. In this paper, we will construct "good" binary
             linear block codes at any rate r < 1 by serially
             concatenating an arbitrary outer code of rate r with a large
             number of rate-1 inner codes through uniform random
             interleavers. We derive the average output weight enumerator
             (WE) for this ensemble in the limit as the number of inner
             codes goes to infinity. Using a probabilistic upper bound on
             the minimum distance, we prove that long codes from this
             ensemble will achieve the Gilbert-Varshamov bound with high
             probability. Numerical evaluation of the minimum distance
             shows that the asymptotic bound can be achieved with a small
             number of inner codes. In essence, this construction
             produces codes with good distance properties which are also
             compatible with iterative "turbo" style decoding. For
             selected codes, we also present bounds on the probability of
             maximum-likelihood decoding (MLD) error and simulation
             results for the probability of iterative decoding
             error.},
   Doi = {10.1109/TIT.2003.811907},
   Key = {fds319406}
}

@article{fds319323,
   Author = {Kumar, S and Young, AJ and Macris, N and Pfister,
             HD},
   Title = {Threshold saturation for spatially coupled LDPC and LDGM
             codes on BMS channels},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {60},
   Number = {12},
   Pages = {7389-7415},
   Year = {2014},
   Month = {December},
   url = {http://dx.doi.org/10.1109/TIT.2014.2360692},
   Abstract = {Spatially-coupled low-density parity-check (LDPC) codes,
             which were first introduced as LDPC convolutional codes,
             have been shown to exhibit excellent performance under
             low-complexity belief-propagation decoding. This phenomenon
             is now termed threshold saturation via spatial coupling.
             Spatially-coupled codes have been successfully applied in
             numerous areas. In particular, it was proven that
             spatially-coupled regular LDPC codes universally achieve
             capacity over the class of binary memoryless symmetric (BMS)
             channels under belief-propagation decoding. Recently,
             potential functions have been used to simplify threshold
             saturation proofs for scalar and vector recursions. In this
             paper, potential functions are used to prove threshold
             saturation for irregular LDPC and low-density
             generator-matrix codes on BMS channels, extending the
             simplified proof technique to BMS channels. The
             corresponding potential functions are closely related to the
             average Bethe free entropy of the ensembles in the
             large-system limit. These functions also appear in
             statistical physics when the replica method is used to
             analyze optimal decoding.},
   Doi = {10.1109/TIT.2014.2360692},
   Key = {fds319323}
}

@article{fds319346,
   Author = {Nguyen, PS and Yedla, A and Pfister, HD and Narayanan,
             KR},
   Title = {Threshold saturation of spatially-coupled codes on
             intersymbol-interference channels},
   Journal = {Ieee International Conference on Communications},
   Pages = {2181-2186},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   ISBN = {9781457720529},
   url = {http://dx.doi.org/10.1109/ICC.2012.6364433},
   Abstract = {Recently, it has been observed that terminated
             low-density-parity-check (LDPC) convolutional codes (or
             spatially-coupled codes) appear to approach the capacity
             universally across the class of binary memoryless channels.
             This is facilitated by the 'threshold saturation' effect
             whereby the belief-propagation (BP) threshold of the
             spatially-coupled ensemble is boosted to the maximum
             a-posteriori (MAP) threshold of the underlying constituent
             ensemble. In this paper, we consider spatially-coupled codes
             over intersymbol-interference (ISI) channels under joint
             iterative decoding where we empirically show that threshold
             saturation also occurs. This can be observed by first
             identifying the GEXIT curve that naturally obeys the general
             area theorem. From this curve, the corresponding MAP and the
             BP threshold estimates are then numerically obtained. Given
             the fact that regular LDPC codes can achieve the symmetric
             information rate (SIR) under MAP decoding, we conjecture
             that spatially-coupled codes with joint iterative decoding
             can universally approach the SIR of ISI channels. © 2012
             IEEE.},
   Doi = {10.1109/ICC.2012.6364433},
   Key = {fds319346}
}

@article{fds358372,
   Author = {Srinivasavaradhan, SR and Gopi, S and Pfister, HD and Yekhanin,
             S},
   Title = {Trellis BMA: Coded Trace Reconstruction on IDS Channels for
             DNA Storage},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Volume = {2021-July},
   Pages = {2453-2458},
   Year = {2021},
   Month = {July},
   url = {http://dx.doi.org/10.1109/ISIT45174.2021.9517821},
   Abstract = {Sequencing a DNA strand, as part of the read process in DNA
             storage, produces multiple noisy copies which can be
             combined to produce better estimates of the original strand;
             this is called trace reconstruction. One can reduce the
             error rate further by introducing redundancy in write
             sequence and this is called coded trace reconstruction. In
             this paper, we model the DNA storage channel as an
             insertion-deletion-substitution (IDS) channel and design
             both encoding schemes and low-complexity decoding algorithms
             for coded trace reconstruction. We introduce Trellis BMA, a
             new reconstruction algorithm whose complexity is linear in
             the number of traces, and compare its performance to
             previous algorithms. Our results show that it reduces the
             error rate on both simulated and experimental data. The
             performance comparisons in this paper are based on the
             Clustered Nanopore Reads Dataset publicly released with this
             paper. Our hope is that this dataset will enable research
             progress by allowing objective comparisons between candidate
             algorithms.},
   Doi = {10.1109/ISIT45174.2021.9517821},
   Key = {fds358372}
}

@article{fds355171,
   Author = {Reeves, G and Pfister, H},
   Title = {Understanding Phase Transitions via Mutual Information and
             MMSE},
   Volume = {abs/1907.02095},
   Year = {2019},
   Month = {July},
   Abstract = {The ability to understand and solve high-dimensional
             inference problems is essential for modern data science.
             This article examines high-dimensional inference problems
             through the lens of information theory and focuses on the
             standard linear model as a canonical example that is both
             rich enough to be practically useful and simple enough to be
             studied rigorously. In particular, this model can exhibit
             phase transitions where an arbitrarily small change in the
             model parameters can induce large changes in the quality of
             estimates. For this model, the performance of optimal
             inference can be studied using the replica method from
             statistical physics but, until recently, it was not known if
             the resulting formulas were actually correct. In this
             chapter, we present a tutorial description of the standard
             linear model and its connection to information theory. We
             also describe the replica prediction for this model and
             outline the authors' recent proof that it is
             exact.},
   Key = {fds355171}
}

@article{fds345739,
   Author = {Rengaswamy, N and Calderbank, R and Pfister, HD},
   Title = {Unifying the Clifford hierarchy via symmetric matrices over
             rings},
   Journal = {Physical Review A},
   Volume = {100},
   Number = {2},
   Year = {2019},
   Month = {August},
   url = {http://dx.doi.org/10.1103/PhysRevA.100.022304},
   Abstract = {The Clifford hierarchy of unitary operators is a
             foundational concept for universal quantum computation. It
             was introduced to show that universal quantum computation
             can be realized via quantum teleportation, given access to
             certain standard resources. While the full structure of the
             hierarchy is still not understood, Cui et al. [S. X. Cui et
             al., Phys. Rev. A 95, 012329 (2017)2469-992610.1103/PhysRevA.95.012329]
             recently described the structure of diagonal unitaries in
             the hierarchy. They considered diagonal unitaries whose
             action on a computational basis qudit state is described by
             a 2kth root of unity raised to some polynomial function of
             the state, and they established the level of such unitaries
             in the hierarchy as a function of k and the degree of the
             polynomial. For qubit systems, we consider kth-level
             diagonal unitaries that can be described just by quadratic
             forms of the state over the ring Z2k of integers modulo 2k.
             The quadratic forms involve symmetric matrices over Z2k that
             can be used to efficiently describe all two-local and
             certain higher locality diagonal gates in the hierarchy. We
             also provide explicit algebraic descriptions of their action
             on Pauli matrices, which establishes a natural recursion to
             diagonal unitaries from lower levels. The result involves
             symplectic matrices over Z2k and hence our perspective
             unifies a subgroup of diagonal gates in the Clifford
             hierarchy with the binary symplectic framework for gates in
             the Clifford group. We augment our description with simple
             examples for certain standard gates. In addition to
             demonstrating structure, these formulas might prove useful
             in applications such as (i) classical simulation of quantum
             circuits, especially via the stabilizer rank approach, (ii)
             synthesis of logical non-Clifford unitaries, specifically
             alternatives to expensive magic state distillation, and
             (iii) decomposition of arbitrary unitaries beyond the
             Clifford+T set of gates, perhaps leading to shorter depth
             circuits. Our results suggest that some nondiagonal gates in
             the hierarchy might also be understood by generalizing other
             binary symplectic matrices to integer rings.},
   Doi = {10.1103/PhysRevA.100.022304},
   Key = {fds345739}
}

@article{fds319355,
   Author = {Yedla, A and Nguyen, PS and Pfister, HD and Narayanan,
             KR},
   Title = {Universal codes for the Gaussian MAC via spatial
             coupling},
   Journal = {2011 49th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2011},
   Pages = {1801-1808},
   Publisher = {IEEE},
   Year = {2011},
   Month = {December},
   ISBN = {9781457718168},
   url = {http://dx.doi.org/10.1109/Allerton.2011.6120387},
   Abstract = {We consider transmission of two independent and separately
             encoded sources over a two-user binary-input Gaussian
             multiple-access channel. The channel gains are assumed to be
             unknown at the transmitter and the goal is to design an
             encoder-decoder pair that achieves reliable communication
             for all channel gains where this is theoretically possible.
             We call such a system universal with respect to the channel
             gains. Kudekar et al. recently showed that terminated
             low-density parity-check convolutional codes (a.k.a.
             spatially-coupled low-density parity-check ensembles) have
             belief-propagation thresholds that approach their maximum
             a-posteriori thresholds. This was proven for binary erasure
             channels and shown empirically for binary memoryless
             symmetric channels. It was conjectured that the principle of
             spatial coupling is very general and the phenomenon of
             threshold saturation applies to a very broad class of
             graphical models. In this work, we derive an area theorem
             for the joint decoder and empirically show that threshold
             saturation occurs for this problem. As a result, we
             demonstrate near-universal performance for this problem
             using the proposed spatially-coupled coding system. © 2011
             IEEE.},
   Doi = {10.1109/Allerton.2011.6120387},
   Key = {fds319355}
}

@article{fds319365,
   Author = {Yedla, A and Nguyen, PS and Pfister, HD and Narayanan,
             KR},
   Title = {Universal codes for the Gaussian MAC via spatial
             coupling.},
   Journal = {Allerton},
   Pages = {1801-1808},
   Publisher = {IEEE},
   Year = {2011},
   ISBN = {978-1-4577-1817-5},
   url = {http://dx.doi.org/10.1109/Allerton.2011.6120387},
   Doi = {10.1109/Allerton.2011.6120387},
   Key = {fds319365}
}

@article{fds319358,
   Author = {Yedla, A and Pfister, HD and Narayanan, KR},
   Title = {Universality for the noisy Slepian-Wolf problem via spatial
             coupling},
   Journal = {Ieee International Symposium on Information Theory
             Proceedings},
   Pages = {2567-2571},
   Publisher = {IEEE},
   Year = {2011},
   Month = {October},
   ISBN = {9781457705953},
   url = {http://dx.doi.org/10.1109/ISIT.2011.6034032},
   Abstract = {We consider a noisy Slepian-Wolf problem where two
             correlated sources are separately encoded and transmitted
             over two independent binary memoryless symmetric channels.
             Each channel capacity is assumed to be characterized by a
             single parameter which is not known at the transmitter. The
             receiver has knowledge of both the source correlation and
             the channel parameters. We call a system universal if it
             retains near-capacity performance without channel knowledge
             at the transmitter. Kudekar et al. recently showed that
             terminated low-density parity-check (LDPC) convolutional
             codes (a.k.a. spatially-coupled LDPC ensembles) can have
             belief-propagation thresholds that approach their maximum
             a-posteriori thresholds. This was proven for binary erasure
             channels and shown empirically for binary memoryless
             symmetric channels. They also conjectured that the principle
             of spatial coupling is very general and the phenomenon of
             threshold saturation applies to a very broad class of
             graphical models. In this work, we derive an area theorem
             for the joint decoder and empirically show that threshold
             saturation occurs for this problem. As a result, we
             demonstrate near-universal performance for this problem
             using the proposed spatially-coupled coding system. A
             similar result is also discussed briefly for the 2-user
             multiple-access channel. © 2011 IEEE.},
   Doi = {10.1109/ISIT.2011.6034032},
   Key = {fds319358}
}

@article{fds319364,
   Author = {Yedla, A and Pfister, HD and Narayanan, KR},
   Title = {Universality for the noisy Slepian-Wolf problem via spatial
             coupling.},
   Journal = {Isit},
   Pages = {2567-2571},
   Publisher = {IEEE},
   Editor = {Kuleshov, A and Blinovsky, VM and Ephremides, A},
   Year = {2011},
   ISBN = {978-1-4577-0596-0},
   url = {http://dx.doi.org/10.1109/ISIT.2011.6034032},
   Doi = {10.1109/ISIT.2011.6034032},
   Key = {fds319364}
}

@article{fds319385,
   Author = {Wang, CW and Pfister, HD},
   Title = {Upper bounds on the MAP threshold of iterative decoding
             systems with erasure noise},
   Journal = {2008 5th International Symposium on Turbo Codes and Related
             Topics, Turbocoding},
   Pages = {7-12},
   Publisher = {IEEE},
   Year = {2008},
   Month = {December},
   ISBN = {9781424428632},
   url = {http://dx.doi.org/10.1109/TURBOCODING.2008.4658664},
   Abstract = {Following the work of Méasson, Montanari, and Urbanke, this
             paper considers the maximum a posteriori (MAP) decoding
             thresholds of three iterative decoding systems. First,
             irregular repeat-accumulate (IRA) and accumulate-repeat-accumulate
             (ARA) code ensembles are analyzed on the binary erasure
             channel (BEC). Next, the joint iterative decoding of LDPC
             codes is studied on the dicode erasure channel (DEC). The
             DEC is a two-state intersymbol-interference (ISI) channel
             with erasure noise, and it is the simplest example of an ISI
             channel with erasure noise. The MAP threshold bound for the
             joint decoder is based on a slight generalization of the
             EXIT area theorem. © 2008 IEEE.},
   Doi = {10.1109/TURBOCODING.2008.4658664},
   Key = {fds319385}
}

@article{fds319351,
   Author = {Zhang, F and Pfister, HD},
   Title = {Verification decoding of high-rate ldpc codes with
             applications in compressed sensing},
   Journal = {Ieee Transactions on Information Theory},
   Volume = {58},
   Number = {8},
   Pages = {5042-5058},
   Year = {2012},
   Month = {July},
   url = {http://dx.doi.org/10.1109/TIT.2012.2201344},
   Abstract = {This paper considers the performance of (j,k)-regular
             low-density parity-check (LDPC) codes with message-passing
             (MP) decoding algorithms in the high-rate regime. In
             particular, we derive the high-rate scaling law for MP
             decoding of LDPC codes on the binary erasure channel (BEC)
             and the q -ary symmetric channel (q-SC). For the BEC and a
             fixed j, the density evolution (DE) threshold of iterative
             decoding scales like \Theta (k^{-1}) and the critical
             stopping ratio scales like \Theta (k^{-j/(j-2)}). For the
             q-SC and a fixed j, the DE threshold of verification
             decoding depends on the details of the decoder and scales
             like \Theta (k^{-1}) for one decoder. Using the fact that
             coding over large finite alphabets is very similar to coding
             over the real numbers, the analysis of verification decoding
             is also extended to the compressed sensing (CS) of strictly
             sparse signals. A DE-based approach is used to analyze the
             CS systems with randomized-reconstruction guarantees. This
             leads to the result that strictly sparse signals can be
             reconstructed efficiently with high probability using a
             constant oversampling ratio (i.e., when the number of
             measurements scales linearly with the sparsity of the
             signal). A stopping-set-based approach is also used to get
             stronger (e.g., uniform-in-probability) reconstruction
             guarantees. © 1963-2012 IEEE.},
   Doi = {10.1109/TIT.2012.2201344},
   Key = {fds319351}
}

@article{fds342176,
   Author = {Lian, M and Häger, C and Pfister, HD},
   Title = {What can machine learning teach us about
             communications?},
   Journal = {2018 Ieee Information Theory Workshop, Itw
             2018},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ITW.2018.8613331},
   Abstract = {Rapid improvements in machine learning over the past decade
             are beginning to have far-reaching effects. For
             communications, engineers with limited domain expertise can
             now use off-the-shelf learning packages to design
             high-performance systems based on simulations. Prior to the
             current revolution in machine learning, the majority of
             communication engineers were quite aware that system
             parameters (such as filter coefficients) could be learned
             using stochastic gradient descent. It was not at all clear,
             however, that more complicated parts of the system
             architecture could be learned as well. In this paper, we
             discuss the application of machine-learning techniques to
             two communications problems and focus on what can be learned
             from the resulting systems. We were pleasantly surprised
             that the observed gains in one example have a simple
             explanation that only became clear in hindsight. In essence,
             deep learning discovered a simple and effective strategy
             that had not been considered earlier.},
   Doi = {10.1109/ITW.2018.8613331},
   Key = {fds342176}
}

@article{fds342385,
   Author = {Häger, C and Pfister, HD},
   Title = {Wideband Time-Domain Digital Backpropagation via Subband
             Processing and Deep Learning},
   Journal = {European Conference on Optical Communication,
             Ecoc},
   Volume = {2018-September},
   Year = {2018},
   Month = {November},
   url = {http://dx.doi.org/10.1109/ECOC.2018.8535251},
   Abstract = {We propose a low-complexity sub-banded DSP architecture for
             digital backpropagation where the walk-off effect is
             compensated using simple delay elements. For a simulated
             96-Gbaud signal and 2500 km optical link, our method
             achieves a 2.8 dB SNR improvement over linear
             equalization.},
   Doi = {10.1109/ECOC.2018.8535251},
   Key = {fds342385}
}

 

dept@math.duke.edu
ph: 919.660.2800
fax: 919.660.2821

Mathematics Department
Duke University, Box 90320
Durham, NC 27708-0320