Publications of Nicolas Brunel    :chronological  combined listing:

%% Papers Published   
@article{fds328483,
   Author = {Graupner, M and Brunel, N},
   Title = {A bitable synaptic model with transitions between states
             induced by calcium dynamics: theory vs experiment},
   Journal = {BMC Neuroscience},
   Volume = {10},
   Number = {Suppl 1},
   Pages = {O15-O15},
   Year = {2009},
   url = {http://dx.doi.org/10.1186/1471-2202-10-S1-O15},
   Doi = {10.1186/1471-2202-10-S1-O15},
   Key = {fds328483}
}

@article{fds328461,
   Author = {Clopath, C and Badura, A and De Zeeuw and CI and Brunel,
             N},
   Title = {A cerebellar learning model of vestibulo-ocular reflex
             adaptation in wild-type and mutant mice.},
   Journal = {The Journal of neuroscience : the official journal of the
             Society for Neuroscience},
   Volume = {34},
   Number = {21},
   Pages = {7203-7215},
   Year = {2014},
   Month = {May},
   url = {http://dx.doi.org/10.1523/jneurosci.2791-13.2014},
   Abstract = {Mechanisms of cerebellar motor learning are still poorly
             understood. The standard Marr-Albus-Ito theory posits that
             learning involves plasticity at the parallel fiber to
             Purkinje cell synapses under control of the climbing fiber
             input, which provides an error signal as in classical
             supervised learning paradigms. However, a growing body of
             evidence challenges this theory, in that additional sites of
             plasticity appear to contribute to motor adaptation. Here,
             we consider phase-reversal training of the vestibulo-ocular
             reflex (VOR), a simple form of motor learning for which a
             large body of experimental data is available in wild-type
             and mutant mice, in which the excitability of granule cells
             or inhibition of Purkinje cells was affected in a
             cell-specific fashion. We present novel electrophysiological
             recordings of Purkinje cell activity measured in naive
             wild-type mice subjected to this VOR adaptation task. We
             then introduce a minimal model that consists of learning at
             the parallel fibers to Purkinje cells with the help of the
             climbing fibers. Although the minimal model reproduces the
             behavior of the wild-type animals and is analytically
             tractable, it fails at reproducing the behavior of mutant
             mice and the electrophysiology data. Therefore, we build a
             detailed model involving plasticity at the parallel fibers
             to Purkinje cells' synapse guided by climbing fibers,
             feedforward inhibition of Purkinje cells, and plasticity at
             the mossy fiber to vestibular nuclei neuron synapse. The
             detailed model reproduces both the behavioral and
             electrophysiological data of both the wild-type and mutant
             mice and allows for experimentally testable
             predictions.},
   Doi = {10.1523/jneurosci.2791-13.2014},
   Key = {fds328461}
}

@article{fds328503,
   Author = {Boucheny, C and Brunel, N and Arleo, A},
   Title = {A continuous attractor network model without recurrent
             excitation: maintenance and integration in the head
             direction cell system.},
   Journal = {Journal of Computational Neuroscience},
   Volume = {18},
   Number = {2},
   Pages = {205-227},
   Year = {2005},
   Month = {March},
   url = {http://dx.doi.org/10.1007/s10827-005-6559-y},
   Abstract = {Motivated by experimental observations of the head direction
             system, we study a three population network model that
             operates as a continuous attractor network. This network is
             able to store in a short-term memory an angular variable
             (the head direction) as a spatial profile of activity across
             neurons in the absence of selective external inputs, and to
             accurately update this variable on the basis of angular
             velocity inputs. The network is composed of one excitatory
             population and two inhibitory populations, with
             inter-connections between populations but no connections
             within the neurons of a same population. In particular,
             there are no excitatory-to-excitatory connections. Angular
             velocity signals are represented as inputs in one inhibitory
             population (clockwise turns) or the other (counterclockwise
             turns). The system is studied using a combination of
             analytical and numerical methods. Analysis of a simplified
             model composed of threshold-linear neurons gives the
             conditions on the connectivity for (i) the emergence of the
             spatially selective profile, (ii) reliable integration of
             angular velocity inputs, and (iii) the range of angular
             velocities that can be accurately integrated by the model.
             Numerical simulations allow us to study the proposed
             scenario in a large network of spiking neurons and compare
             their dynamics with that of head direction cells recorded in
             the rat limbic system. In particular, we show that the
             directional representation encoded by the attractor network
             can be rapidly updated by external cues, consistent with the
             very short update latencies observed experimentally by
             Zugaro et al. (2003) in thalamic head direction
             cells.},
   Doi = {10.1007/s10827-005-6559-y},
   Key = {fds328503}
}

@article{fds328537,
   Author = {BRUNEL, NICOLAS and ZECCHINA, RICCARDO},
   Title = {A SIMPLE GEOMETRICAL BOUND FOR REPLICA SYMMETRY STABILITY IN
             NEURAL NETWORKS MODELS},
   Journal = {Modern Physics Letters B},
   Volume = {09},
   Number = {18},
   Pages = {1159-1164},
   Year = {1995},
   Month = {August},
   url = {http://dx.doi.org/10.1142/S0217984995001157},
   Doi = {10.1142/S0217984995001157},
   Key = {fds328537}
}

@article{fds328455,
   Author = {Alemi, A and Baldassi, C and Brunel, N and Zecchina,
             R},
   Title = {A Three-Threshold Learning Rule Approaches the Maximal
             Capacity of Recurrent Neural Networks.},
   Journal = {PLoS computational biology},
   Volume = {11},
   Number = {8},
   Pages = {e1004439},
   Year = {2015},
   Month = {August},
   url = {http://dx.doi.org/10.1371/journal.pcbi.1004439},
   Abstract = {Understanding the theoretical foundations of how memories
             are encoded and retrieved in neural populations is a central
             challenge in neuroscience. A popular theoretical scenario
             for modeling memory function is the attractor neural network
             scenario, whose prototype is the Hopfield model. The model
             simplicity and the locality of the synaptic update rules
             come at the cost of a poor storage capacity, compared with
             the capacity achieved with perceptron learning algorithms.
             Here, by transforming the perceptron learning rule, we
             present an online learning rule for a recurrent neural
             network that achieves near-maximal storage capacity without
             an explicit supervisory error signal, relying only upon
             locally accessible information. The fully-connected network
             consists of excitatory binary neurons with plastic recurrent
             connections and non-plastic inhibitory feedback stabilizing
             the network dynamics; the memory patterns to be memorized
             are presented online as strong afferent currents, producing
             a bimodal distribution for the neuron synaptic inputs.
             Synapses corresponding to active inputs are modified as a
             function of the value of the local fields with respect to
             three thresholds. Above the highest threshold, and below the
             lowest threshold, no plasticity occurs. In between these two
             thresholds, potentiation/depression occurs when the local
             field is above/below an intermediate threshold. We simulated
             and analyzed a network of binary neurons implementing this
             rule and measured its storage capacity for different sizes
             of the basins of attraction. The storage capacity obtained
             through numerical simulations is shown to be close to the
             value predicted by analytical calculations. We also measured
             the dependence of capacity on the strength of external
             inputs. Finally, we quantified the statistics of the
             resulting synaptic connectivity matrix, and found that both
             the fraction of zero weight synapses and the degree of
             symmetry of the weight matrix increase with the number of
             stored patterns.},
   Doi = {10.1371/journal.pcbi.1004439},
   Key = {fds328455}
}

@article{fds328549,
   Author = {Amit, DJ and Brunel, N},
   Title = {Adequate input for learning in attractor neural
             networks},
   Journal = {Network: Computation in Neural Systems (Informa)},
   Volume = {4},
   Number = {2},
   Pages = {177-194},
   Year = {1993},
   Month = {January},
   url = {http://dx.doi.org/10.1088/0954-898X_4_2_003},
   Doi = {10.1088/0954-898X_4_2_003},
   Key = {fds328549}
}

@article{fds328464,
   Author = {Hertäg, L and Durstewitz, D and Brunel, N},
   Title = {Analytical approximations of the firing rate of an adaptive
             exponential integrate-and-fire neuron in the presence of
             synaptic noise.},
   Journal = {Frontiers in Computational Neuroscience},
   Volume = {8},
   Pages = {116},
   Year = {2014},
   Month = {January},
   url = {http://dx.doi.org/10.3389/fncom.2014.00116},
   Abstract = {Computational models offer a unique tool for understanding
             the network-dynamical mechanisms which mediate between
             physiological and biophysical properties, and behavioral
             function. A traditional challenge in computational
             neuroscience is, however, that simple neuronal models which
             can be studied analytically fail to reproduce the diversity
             of electrophysiological behaviors seen in real neurons,
             while detailed neuronal models which do reproduce such
             diversity are intractable analytically and computationally
             expensive. A number of intermediate models have been
             proposed whose aim is to capture the diversity of firing
             behaviors and spike times of real neurons while entailing
             the simplest possible mathematical description. One such
             model is the exponential integrate-and-fire neuron with
             spike rate adaptation (aEIF) which consists of two
             differential equations for the membrane potential (V) and an
             adaptation current (w). Despite its simplicity, it can
             reproduce a wide variety of physiologically observed spiking
             patterns, can be fit to physiological recordings
             quantitatively, and, once done so, is able to predict spike
             times on traces not used for model fitting. Here we compute
             the steady-state firing rate of aEIF in the presence of
             Gaussian synaptic noise, using two approaches. The first
             approach is based on the 2-dimensional Fokker-Planck
             equation that describes the (V,w)-probability distribution,
             which is solved using an expansion in the ratio between the
             time constants of the two variables. The second is based on
             the firing rate of the EIF model, which is averaged over the
             distribution of the w variable. These analytically derived
             closed-form expressions were tested on simulations from a
             large variety of model cells quantitatively fitted to in
             vitro electrophysiological recordings from pyramidal cells
             and interneurons. Theoretical predictions closely agreed
             with the firing rate of the simulated cells fed with
             in-vivo-like synaptic noise.},
   Doi = {10.3389/fncom.2014.00116},
   Key = {fds328464}
}

@article{fds328450,
   Author = {De Pittà and M and Brunel, N and Volterra, A},
   Title = {Astrocytes: Orchestrating synaptic plasticity?},
   Journal = {Neuroscience},
   Volume = {323},
   Pages = {43-61},
   Year = {2016},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.neuroscience.2015.04.001},
   Abstract = {Synaptic plasticity is the capacity of a preexisting
             connection between two neurons to change in strength as a
             function of neural activity. Because synaptic plasticity is
             the major candidate mechanism for learning and memory, the
             elucidation of its constituting mechanisms is of crucial
             importance in many aspects of normal and pathological brain
             function. In particular, a prominent aspect that remains
             debated is how the plasticity mechanisms, that encompass a
             broad spectrum of temporal and spatial scales, come to play
             together in a concerted fashion. Here we review and discuss
             evidence that pinpoints to a possible non-neuronal, glial
             candidate for such orchestration: the regulation of synaptic
             plasticity by astrocytes.},
   Doi = {10.1016/j.neuroscience.2015.04.001},
   Key = {fds328450}
}

@article{fds328910,
   Author = {Tartaglia, EM and Brunel, N},
   Title = {Bistability and up/down state alternations in
             inhibition-dominated randomly connected networks of LIF
             neurons.},
   Journal = {Scientific Reports},
   Volume = {7},
   Number = {1},
   Pages = {11916},
   Year = {2017},
   Month = {September},
   url = {http://dx.doi.org/10.1038/s41598-017-12033-y},
   Abstract = {Electrophysiological recordings in cortex in vivo have
             revealed a rich variety of dynamical regimes ranging from
             irregular asynchronous states to a diversity of synchronized
             states, depending on species, anesthesia, and external
             stimulation. The average population firing rate in these
             states is typically low. We study analytically and
             numerically a network of sparsely connected excitatory and
             inhibitory integrate-and-fire neurons in the
             inhibition-dominated, low firing rate regime. For
             sufficiently high values of the external input, the network
             exhibits an asynchronous low firing frequency state (L).
             Depending on synaptic time constants, we show that two
             scenarios may occur when external inputs are decreased: (1)
             the L state can destabilize through a Hopf bifucation as the
             external input is decreased, leading to synchronized
             oscillations spanning d δ to β frequencies; (2) the
             network can reach a bistable region, between the low firing
             frequency network state (L) and a quiescent one (Q). Adding
             an adaptation current to excitatory neurons leads to
             spontaneous alternations between L and Q states, similar to
             experimental observations on UP and DOWN states
             alternations.},
   Doi = {10.1038/s41598-017-12033-y},
   Key = {fds328910}
}

@article{fds328452,
   Author = {Bouvier, G and Higgins, D and Spolidoro, M and Carrel, D and Mathieu, B and Léna, C and Dieudonné, S and Barbour, B and Brunel, N and Casado,
             M},
   Title = {Burst-Dependent Bidirectional Plasticity in the Cerebellum
             Is Driven by Presynaptic NMDA Receptors.},
   Journal = {Cell Reports},
   Volume = {15},
   Number = {1},
   Pages = {104-116},
   Year = {2016},
   Month = {April},
   url = {http://dx.doi.org/10.1016/j.celrep.2016.03.004},
   Abstract = {Numerous studies have shown that cerebellar function is
             related to the plasticity at the synapses between parallel
             fibers and Purkinje cells. How specific input patterns
             determine plasticity outcomes, as well as the biophysics
             underlying plasticity of these synapses, remain unclear.
             Here, we characterize the patterns of activity that lead to
             postsynaptically expressed LTP using both in vivo and in
             vitro experiments. Similar to the requirements of LTD, we
             find that high-frequency bursts are necessary to trigger LTP
             and that this burst-dependent plasticity depends on
             presynaptic NMDA receptors and nitric oxide (NO) signaling.
             We provide direct evidence for calcium entry through
             presynaptic NMDA receptors in a subpopulation of parallel
             fiber varicosities. Finally, we develop and experimentally
             verify a mechanistic plasticity model based on NO and
             calcium signaling. The model reproduces plasticity outcomes
             from data and predicts the effect of arbitrary patterns of
             synaptic inputs on Purkinje cells, thereby providing a
             unified description of plasticity.},
   Doi = {10.1016/j.celrep.2016.03.004},
   Key = {fds328452}
}

@article{fds328468,
   Author = {Graupner, M and Brunel, N},
   Title = {Calcium-based plasticity model explains sensitivity of
             synaptic changes to spike pattern, rate, and dendritic
             location.},
   Journal = {Proceedings of the National Academy of Sciences of
             USA},
   Volume = {109},
   Number = {10},
   Pages = {3991-3996},
   Year = {2012},
   Month = {March},
   url = {http://dx.doi.org/10.1073/pnas.1109359109},
   Abstract = {Multiple stimulation protocols have been found to be
             effective in changing synaptic efficacy by inducing
             long-term potentiation or depression. In many of those
             protocols, increases in postsynaptic calcium concentration
             have been shown to play a crucial role. However, it is still
             unclear whether and how the dynamics of the postsynaptic
             calcium alone determine the outcome of synaptic plasticity.
             Here, we propose a calcium-based model of a synapse in which
             potentiation and depression are activated above calcium
             thresholds. We show that this model gives rise to a large
             diversity of spike timing-dependent plasticity curves, most
             of which have been observed experimentally in different
             systems. It accounts quantitatively for plasticity outcomes
             evoked by protocols involving patterns with variable spike
             timing and firing rate in hippocampus and neocortex.
             Furthermore, it allows us to predict that differences in
             plasticity outcomes in different studies are due to
             differences in parameters defining the calcium dynamics. The
             model provides a mechanistic understanding of how various
             stimulation protocols provoke specific synaptic changes
             through the dynamics of calcium concentration and thresholds
             implementing in simplified fashion protein signaling
             cascades, leading to long-term potentiation and long-term
             depression. The combination of biophysical realism and
             analytical tractability makes it the ideal candidate to
             study plasticity at the synapse, neuron, and network
             levels.},
   Doi = {10.1073/pnas.1109359109},
   Key = {fds328468}
}

@article{fds328487,
   Author = {Barbieri, F and Brunel, N},
   Title = {Can attractor network models account for the statistics of
             firing during persistent activity in prefrontal
             cortex?},
   Journal = {Frontiers in Neuroscience},
   Volume = {2},
   Number = {1},
   Pages = {114-122},
   Year = {2008},
   Month = {July},
   url = {http://dx.doi.org/10.3389/neuro.01.003.2008},
   Abstract = {Persistent activity observed in neurophysiological
             experiments in monkeys is thought to be the neuronal
             correlate of working memory. Over the last decade, network
             modellers have strived to reproduce the main features of
             these experiments. In particular, attractor network models
             have been proposed in which there is a coexistence between a
             non-selective attractor state with low background activity
             with selective attractor states in which sub-groups of
             neurons fire at rates which are higher (but not much higher)
             than background rates. A recent detailed statistical
             analysis of the data seems however to challenge such
             attractor models: the data indicates that firing during
             persistent activity is highly irregular (with an average CV
             larger than 1), while models predict a more regular firing
             process (CV smaller than 1). We discuss here recent
             proposals that allow to reproduce this feature of the
             experiments.},
   Doi = {10.3389/neuro.01.003.2008},
   Key = {fds328487}
}

@article{fds328500,
   Author = {Geisler, C and Brunel, N and Wang, X-J},
   Title = {Contributions of intrinsic membrane dynamics to fast network
             oscillations with irregular neuronal discharges.},
   Journal = {Journal of neurophysiology},
   Volume = {94},
   Number = {6},
   Pages = {4344-4361},
   Year = {2005},
   Month = {December},
   url = {http://dx.doi.org/10.1152/jn.00510.2004},
   Abstract = {During fast oscillations in the local field potential
             (40-100 Hz gamma, 100-200 Hz sharp-wave ripples) single
             cortical neurons typically fire irregularly at rates that
             are much lower than the oscillation frequency. Recent
             computational studies have provided a mathematical
             description of such fast oscillations, using the leaky
             integrate-and-fire (LIF) neuron model. Here, we extend this
             theoretical framework to populations of more realistic
             Hodgkin-Huxley-type conductance-based neurons. In a noisy
             network of GABAergic neurons that are connected randomly and
             sparsely by chemical synapses, coherent oscillations emerge
             with a frequency that depends sensitively on the single
             cell's membrane dynamics. The population frequency can be
             predicted analytically from the synaptic time constants and
             the preferred phase of discharge during the oscillatory
             cycle of a single cell subjected to noisy sinusoidal input.
             The latter depends significantly on the single cell's
             membrane properties and can be understood in the context of
             the simplified exponential integrate-and-fire (EIF) neuron.
             We find that 200-Hz oscillations can be generated, provided
             the effective input conductance of single cells is large, so
             that the single neuron's phase shift is sufficiently small.
             In a two-population network of excitatory pyramidal cells
             and inhibitory neurons, recurrent excitation can either
             decrease or increase the population rhythmic frequency,
             depending on whether in a neuron the excitatory synaptic
             current follows or precedes the inhibitory synaptic current
             in an oscillatory cycle. Detailed single-cell properties
             have a substantial impact on population oscillations, even
             though rhythmicity does not originate from pacemaker neurons
             and is an emergent network phenomenon.},
   Doi = {10.1152/jn.00510.2004},
   Key = {fds328500}
}

@article{fds328542,
   Author = {Amit, DJ and Brunel, N and Tsodyks, MV},
   Title = {Correlations of cortical hebbian reverberations: Theory
             versus experiment},
   Journal = {The Journal of neuroscience : the official journal of the
             Society for Neuroscience},
   Volume = {14},
   Number = {11 I},
   Pages = {6435-6445},
   Year = {1994},
   Month = {November},
   Abstract = {Interpreting recent single-unit recordings of delay
             activities in delayed match-to-sample experiments in
             anterior ventral temporal (AVT) cortex of monkeys in terms
             of reverberation dynamics, we present a model neural network
             of quasi-realistic elements that reproduces the empirical
             results in great detail. Information about the contiguity of
             successive stimuli in the training sequence, representing
             the fact that training is done on a set of uncorrelated
             stimuli presented in a fixed temporal sequence, is embedded
             in the synaptic structure. The model reproduces quite
             accurately the correlations between delay activity
             distributions corresponding to stimulation with the
             uncorrelated stimuli used for training. It reproduces also
             the activity distributions of spike rates on sample cells as
             a function of the stimulating pattern. It is, in our view,
             the first time that a computational phenomenon, represented
             on the neurophysiological level, is reproduced in all its
             quantitative aspects. The model is then used to make
             predictions about further features of the physiology of such
             experiments. Those include further properties of the
             correlations, features of selective cells as discriminators
             of stimuli provoking different delay activity distributions,
             and activity distributions among the neurons in a delay
             activity produced by a given pattern. The model has
             predictive implications also for the dependence of the delay
             activities on different training protocols. Finally, we
             discuss the perspectives of the interplay between such
             models and neurophysiology as well as its limitations and
             possible extensions.},
   Key = {fds328542}
}

@article{fds328544,
   Author = {Amit, DJ and Brunel, N and Tsodyks, MV},
   Title = {Correlations of cortical Hebbian reverberations: theory
             versus experiment.},
   Journal = {The Journal of neuroscience : the official journal of the
             Society for Neuroscience},
   Volume = {14},
   Number = {11 Pt 1},
   Pages = {6435-6445},
   Year = {1994},
   Month = {November},
   Abstract = {Interpreting recent single-unit recordings of delay
             activities in delayed match-to-sample experiments in
             anterior ventral temporal (AVT) cortex of monkeys in terms
             of reverberation dynamics, we present a model neural network
             of quasi-realistic elements that reproduces the empirical
             results in great detail. Information about the contiguity of
             successive stimuli in the training sequence, representing
             the fact that training is done on a set of uncorrelated
             stimuli presented in a fixed temporal sequence, is embedded
             in the synaptic structure. The model reproduces quite
             accurately the correlations between delay activity
             distributions corresponding to stimulation with the
             uncorrelated stimuli used for training. It reproduces also
             the activity distributions of spike rates on sample cells as
             a function of the stimulating pattern. It is, in our view,
             the first time that a computational phenomenon, represented
             on the neurophysiological level, is reproduced in all its
             quantitative aspects. The model is then used to make
             predictions about further features of the physiology of such
             experiments. Those include further properties of the
             correlations, features of selective cells as discriminators
             of stimuli provoking different delay activity distributions,
             and activity distributions among the neurons in a delay
             activity produced by a given pattern. The model has
             predictive implications also for the dependence of the delay
             activities on different training protocols. Finally, we
             discuss the perspectives of the interplay between such
             models and neurophysiology as well as its limitations and
             possible extensions.},
   Key = {fds328544}
}

@article{fds328473,
   Author = {Mazzoni, A and Brunel, N and Cavallari, S and Logothetis, NK and Panzeri, S},
   Title = {Cortical dynamics during naturalistic sensory stimulations:
             experiments and models.},
   Journal = {Journal of Physiology - Paris},
   Volume = {105},
   Number = {1-3},
   Pages = {2-15},
   Year = {2011},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.jphysparis.2011.07.014},
   Abstract = {We report the results of our experimental and theoretical
             investigations of the neural response dynamics in primary
             visual cortex (V1) during naturalistic visual stimulation.
             We recorded Local Field Potentials (LFPs) and spiking
             activity from V1 of anaesthetized macaques during binocular
             presentation of Hollywood color movies. We analyzed these
             recordings with information theoretic methods, and found
             that visual information was encoded mainly by two bands of
             LFP responses: the network fluctuations measured by the
             phase and power of low-frequency (less than 12 Hz) LFPs; and
             fast gamma-range (50-100 Hz) oscillations. Both the power
             and phase of low frequency LFPs carried information largely
             complementary to that carried by spikes, whereas gamma range
             oscillations carried information largely redundant to that
             of spikes. To interpret these results within a quantitative
             theoretical framework, we then simulated a sparsely
             connected recurrent network of excitatory and inhibitory
             neurons receiving slowly varying naturalistic inputs, and we
             determined how the LFPs generated by the network encoded
             information about the inputs. We found that this simulated
             recurrent network reproduced well the experimentally
             observed dependency of LFP information upon frequency. This
             network encoded the overall strength of the input into the
             power of gamma-range oscillations generated by
             inhibitory-excitatory neural interactions, and encoded slow
             variations in the input by entraining the network LFP at the
             corresponding frequency. This dynamical behavior accounted
             quantitatively for the independent information carried by
             high and low frequency LFPs, and for the experimentally
             observed cross-frequency coupling between phase of slow LFPs
             and the power of gamma LFPs. We also present new results
             showing that the model's dynamics also accounted for the
             extra visual information that the low-frequency LFP phase of
             spike firing carries beyond that carried by spike rates.
             Overall, our results suggest biological mechanisms by which
             cortex can multiplex information about naturalistic sensory
             environments.},
   Doi = {10.1016/j.jphysparis.2011.07.014},
   Key = {fds328473}
}

@article{fds328499,
   Author = {Brunel, N},
   Title = {Course 10 Network models of memory},
   Journal = {Les Houches},
   Volume = {80},
   Number = {C},
   Pages = {407-476},
   Year = {2005},
   Month = {December},
   url = {http://dx.doi.org/10.1016/S0924-8099(05)80016-2},
   Doi = {10.1016/S0924-8099(05)80016-2},
   Key = {fds328499}
}

@article{fds328533,
   Author = {Brunel, N},
   Title = {Cross-correlations in sparsely connected recurrent networks
             of spiking neurons},
   Journal = {Lecture notes in computer science},
   Volume = {1327},
   Pages = {31-36},
   Year = {1997},
   Month = {January},
   ISBN = {3540636315},
   Abstract = {© Springer-Verlag Berlin Heidelberg 1997. We study the
             dynamics of sparsely connected recurrent networks composed
             of excitatory and inhibitory integrate-and-fire (IF) neurons
             firing at low rates, and in particular cross-correlations
             (CC) between spike times of pairs of neurons using both
             numerical simulations and a recent theory. CCs exhibit
             damped oscillations with a frequency which depends on
             synaptic time constants. Individual CCs are shown to depend
             weakly on synaptic connectivity. They depend more strongly
             on the firing rates of individual neurons.},
   Key = {fds328533}
}

@article{fds328490,
   Author = {Brunel, N},
   Title = {Daniel Amit (1938-2007).},
   Journal = {Network: Computation in Neural Systems (Informa)},
   Volume = {19},
   Number = {1},
   Pages = {3-8},
   Year = {2008},
   Month = {January},
   url = {http://dx.doi.org/10.1080/09548980801915391},
   Doi = {10.1080/09548980801915391},
   Key = {fds328490}
}

@article{fds328506,
   Author = {Brunel, N},
   Title = {Dynamics and plasticity of stimulus-selective persistent
             activity in cortical network models.},
   Journal = {Cerebral Cortex},
   Volume = {13},
   Number = {11},
   Pages = {1151-1161},
   Year = {2003},
   Month = {November},
   url = {http://dx.doi.org/10.1093/cercor/bhg096},
   Abstract = {Persistent neuronal activity is widespread in many areas of
             the cerebral cortex of monkeys performing cognitive tasks
             with a working memory component. Modeling studies have
             helped understanding of the conditions under which
             persistent activity can be sustained in cortical circuits.
             Here, we first review several basic models of persistent
             activity, including bistable models with excitation only and
             multistable models for working memory of a discrete set of
             pictures or objects with structured excitation and global
             inhibition. In many experiments, persistent activity has
             been shown to be subject to changes due to associative
             learning. In cortical network models, Hebbian learning
             shapes the synaptic structure and, in turn, the properties
             of persistent activity when pictures are associated together
             in the course of a task. It is shown how the theoretical
             models can reproduce basic experimental findings of
             neurophysiological recordings from inferior temporal and
             perirhinal cortices obtained using the following
             experimental protocols: (i) the pair-associate task; (ii)
             the pair-associate task with color switch; and (iii) the
             delay match to sample task with a fixed sequence of
             samples.},
   Doi = {10.1093/cercor/bhg096},
   Key = {fds328506}
}

@article{fds328530,
   Author = {Amit, D and Brunel, N},
   Title = {Dynamics of a recurrent network of spiking neurons before
             and following learning},
   Journal = {Network: Computation in Neural Systems (Informa)},
   Volume = {8},
   Number = {4},
   Pages = {373-404},
   Year = {1997},
   Month = {November},
   url = {http://dx.doi.org/10.1088/0954-898X/8/4/003},
   Doi = {10.1088/0954-898X/8/4/003},
   Key = {fds328530}
}

@article{fds328543,
   Author = {Brunel, N},
   Title = {Dynamics of an attractor neural network converting temporal
             into spatial correlations},
   Journal = {Network: Computation in Neural Systems (Informa)},
   Volume = {5},
   Number = {4},
   Pages = {449-470},
   Year = {1994},
   Month = {November},
   url = {http://dx.doi.org/10.1088/0954-898X/5/4/003},
   Doi = {10.1088/0954-898X/5/4/003},
   Key = {fds328543}
}

@article{fds328547,
   Author = {Brunel, N},
   Title = {Dynamics of an attractor neural network converting temporal
             into spatial correlations},
   Journal = {Network: Computation in Neural Systems (Informa)},
   Volume = {5},
   Number = {4},
   Pages = {449-470},
   Year = {1994},
   Month = {January},
   url = {http://dx.doi.org/10.1088/0954-898X_5_4_003},
   Doi = {10.1088/0954-898X_5_4_003},
   Key = {fds328547}
}

@article{fds328472,
   Author = {Ledoux, E and Brunel, N},
   Title = {Dynamics of networks of excitatory and inhibitory neurons in
             response to time-dependent inputs.},
   Journal = {Frontiers in Computational Neuroscience},
   Volume = {5},
   Pages = {25},
   Year = {2011},
   Month = {January},
   url = {http://dx.doi.org/10.3389/fncom.2011.00025},
   Abstract = {We investigate the dynamics of recurrent networks of
             excitatory (E) and inhibitory (I) neurons in the presence of
             time-dependent inputs. The dynamics is characterized by the
             network dynamical transfer function, i.e., how the
             population firing rate is modulated by sinusoidal inputs at
             arbitrary frequencies. Two types of networks are studied and
             compared: (i) a Wilson-Cowan type firing rate model; and
             (ii) a fully connected network of leaky integrate-and-fire
             (LIF) neurons, in a strong noise regime. We first
             characterize the region of stability of the "asynchronous
             state" (a state in which population activity is constant in
             time when external inputs are constant) in the space of
             parameters characterizing the connectivity of the network.
             We then systematically characterize the qualitative
             behaviors of the dynamical transfer function, as a function
             of the connectivity. We find that the transfer function can
             be either low-pass, or with a single or double resonance,
             depending on the connection strengths and synaptic time
             constants. Resonances appear when the system is close to
             Hopf bifurcations, that can be induced by two separate
             mechanisms: the I-I connectivity and the E-I connectivity.
             Double resonances can appear when excitatory delays are
             larger than inhibitory delays, due to the fact that two
             distinct instabilities exist with a finite gap between the
             corresponding frequencies. In networks of LIF neurons,
             changes in external inputs and external noise are shown to
             be able to change qualitatively the network transfer
             function. Firing rate models are shown to exhibit the same
             diversity of transfer functions as the LIF network, provided
             delays are present. They can also exhibit input-dependent
             changes of the transfer function, provided a suitable static
             non-linearity is incorporated.},
   Doi = {10.3389/fncom.2011.00025},
   Key = {fds328472}
}

@article{fds328518,
   Author = {Brunel, N},
   Title = {Dynamics of networks of randomly connected excitatory and
             inhibitory spiking neurons.},
   Journal = {Journal of Physiology - Paris},
   Volume = {94},
   Number = {5-6},
   Pages = {445-463},
   Year = {2000},
   Month = {September},
   url = {http://dx.doi.org/10.1016/s0928-4257(00)01084-6},
   Abstract = {Recent advances in the understanding of the dynamics of
             populations of spiking neurones are reviewed. These studies
             shed light on how a population of neurones can follow
             arbitrary variations in input stimuli, how the dynamics of
             the population depends on the type of noise, and how
             recurrent connections influence the dynamics. The importance
             of inhibitory feedback for the generation of irregularity in
             single cell behaviour is emphasized. Examples of computation
             that recurrent networks with excitatory and inhibitory cells
             can perform are then discussed. Maintenance of a network
             state as an attractor of the system is discussed as a model
             for working memory function, in both object and spatial
             modalities. These models can be used to interpret and make
             predictions about electrophysiological data in the awake
             monkey.},
   Doi = {10.1016/s0928-4257(00)01084-6},
   Key = {fds328518}
}

@article{fds328520,
   Author = {Brunel, N},
   Title = {Dynamics of sparsely connected networks of excitatory and
             inhibitory spiking neurons.},
   Journal = {Journal of Computational Neuroscience},
   Volume = {8},
   Number = {3},
   Pages = {183-208},
   Year = {2000},
   Month = {May},
   url = {http://dx.doi.org/10.1023/a:1008925309027},
   Abstract = {The dynamics of networks of sparsely connected excitatory
             and inhibitory integrate-and-fire neurons are studied
             analytically. The analysis reveals a rich repertoire of
             states, including synchronous states in which neurons fire
             regularly; asynchronous states with stationary global
             activity and very irregular individual cell activity; and
             states in which the global activity oscillates but
             individual cells fire irregularly, typically at rates lower
             than the global oscillation frequency. The network can
             switch between these states, provided the external
             frequency, or the balance between excitation and inhibition,
             is varied. Two types of network oscillations are observed.
             In the fast oscillatory state, the network frequency is
             almost fully controlled by the synaptic time scale. In the
             slow oscillatory state, the network frequency depends mostly
             on the membrane time constant. Finite size effects in the
             asynchronous state are also discussed.},
   Doi = {10.1023/a:1008925309027},
   Key = {fds328520}
}

@article{fds328513,
   Author = {Fourcaud, N and Brunel, N},
   Title = {Dynamics of the firing probability of noisy
             integrate-and-fire neurons.},
   Journal = {Neural Computation},
   Volume = {14},
   Number = {9},
   Pages = {2057-2110},
   Year = {2002},
   Month = {September},
   url = {http://dx.doi.org/10.1162/089976602320264015},
   Abstract = {Cortical neurons in vivo undergo a continuous bombardment
             due to synaptic activity, which acts as a major source of
             noise. Here, we investigate the effects of the noise
             filtering by synapses with various levels of realism on
             integrate-and-fire neuron dynamics. The noise input is
             modeled by white (for instantaneous synapses) or colored
             (for synapses with a finite relaxation time) noise.
             Analytical results for the modulation of firing probability
             in response to an oscillatory input current are obtained by
             expanding a Fokker-Planck equation for small parameters of
             the problem - when both the amplitude of the modulation is
             small compared to the background firing rate and the
             synaptic time constant is small compared to the membrane
             time constant. We report here the detailed calculations
             showing that if a synaptic decay time constant is included
             in the synaptic current model, the firing-rate modulation of
             the neuron due to an oscillatory input remains finite in the
             high-frequency limit with no phase lag. In addition, we
             characterize the low-frequency behavior and the behavior of
             the high-frequency limit for intermediate decay times. We
             also characterize the effects of introducing a rise time to
             the synaptic currents and the presence of several synaptic
             receptors with different kinetics. In both cases, we
             determine, using numerical simulations, an effective decay
             time constant that describes the neuronal response
             completely.},
   Doi = {10.1162/089976602320264015},
   Key = {fds328513}
}

@article{fds328502,
   Author = {Fourcaud-Trocmé, N and Brunel, N},
   Title = {Dynamics of the instantaneous firing rate in response to
             changes in input statistics.},
   Journal = {Journal of Computational Neuroscience},
   Volume = {18},
   Number = {3},
   Pages = {311-321},
   Year = {2005},
   Month = {June},
   url = {http://dx.doi.org/10.1007/s10827-005-0337-8},
   Abstract = {We review and extend recent results on the instantaneous
             firing rate dynamics of simplified models of spiking neurons
             in response to noisy current inputs. It has been shown
             recently that the response of the instantaneous firing rate
             to small amplitude oscillations in the mean inputs depends
             in the large frequency limit f on the spike initiation
             dynamics. A particular simplified model, the exponential
             integrate-and-fire (EIF) model, has a response that decays
             as 1/f in the large frequency limit and describes very well
             the response of conductance-based models with a
             Hodgkin-Huxley type fast sodium current. Here, we show that
             the response of the EIF instantaneous firing rate also
             decays as 1/f in the case of an oscillation in the variance
             of the inputs for both white and colored noise. We then
             compute the initial transient response of the firing rate of
             the EIF model to a step change in its mean inputs and/or in
             the variance of its inputs. We show that in both cases the
             response speed is proportional to the neuron stationary
             firing rate and inversely proportional to a 'spike slope
             factor' Delta(T) that controls the sharpness of spike
             initiation: as 1/Delta(T) for a step change in mean inputs,
             and as 1/Delta(T) (2) for a step change in the variance in
             the inputs.},
   Doi = {10.1007/s10827-005-0337-8},
   Key = {fds328502}
}

@article{fds328548,
   Author = {Brunel, N},
   Title = {Effect of synapse dilution on the memory retrieval in
             structured attractor neural networks},
   Journal = {Journal de Physique, I},
   Volume = {3},
   Number = {8},
   Pages = {1693-1715},
   Year = {1993},
   Month = {August},
   url = {http://dx.doi.org/10.1051/jp1:1993210},
   Doi = {10.1051/jp1:1993210},
   Key = {fds328548}
}

@article{fds328514,
   Author = {Brunel, N and Wang, XJ},
   Title = {Effects of neuromodulation in a cortical network model of
             object working memory dominated by recurrent
             inhibition.},
   Journal = {Journal of Computational Neuroscience},
   Volume = {11},
   Number = {1},
   Pages = {63-85},
   Year = {2001},
   Month = {July},
   url = {http://dx.doi.org/10.1023/a:1011204814320},
   Abstract = {Experimental evidence suggests that the maintenance of an
             item in working memory is achieved through persistent
             activity in selective neural assemblies of the cortex. To
             understand the mechanisms underlying this phenomenon, it is
             essential to investigate how persistent activity is affected
             by external inputs or neuromodulation. We have addressed
             these questions using a recurrent network model of object
             working memory. Recurrence is dominated by inhibition,
             although persistent activity is generated through recurrent
             excitation in small subsets of excitatory neurons. Our main
             findings are as follows. (1) Because of the strong feedback
             inhibition, persistent activity shows an inverted U shape as
             a function of increased external drive to the network. (2) A
             transient external excitation can switch off a network from
             a selective persistent state to its spontaneous state. (3)
             The maintenance of the sample stimulus in working memory is
             not affected by intervening stimuli (distractors) during the
             delay period, provided the stimulation intensity is not
             large. On the other hand, if stimulation intensity is large
             enough, distractors disrupt sample-related persistent
             activity, and the network is able to maintain a memory only
             of the last shown stimulus. (4) A concerted modulation of
             GABA(A) and NMDA conductances leads to a decrease of
             spontaneous activity but an increase of persistent activity;
             the enhanced signal-to-noise ratio is shown to increase the
             resistance of the network to distractors. (5) Two mechanisms
             are identified that produce an inverted U shaped dependence
             of persistent activity on modulation. The present study
             therefore points to several mechanisms that enhance the
             signal-to-noise ratio in working memory states. These
             mechanisms could be implemented in the prefrontal cortex by
             dopaminergic projections from the midbrain.},
   Doi = {10.1023/a:1011204814320},
   Key = {fds328514}
}

@article{fds328515,
   Author = {Brunel, N and Chance, FS and Fourcaud, N and Abbott,
             LF},
   Title = {Effects of synaptic noise and filtering on the frequency
             response of spiking neurons.},
   Journal = {Physical Review Letters},
   Volume = {86},
   Number = {10},
   Pages = {2186-2189},
   Year = {2001},
   Month = {March},
   url = {http://dx.doi.org/10.1103/physrevlett.86.2186},
   Abstract = {Noise can have a significant impact on the response dynamics
             of a nonlinear system. For neurons, the primary source of
             noise comes from background synaptic input activity. If this
             is approximated as white noise, the amplitude of the
             modulation of the firing rate in response to an input
             current oscillating at frequency omega decreases as 1/square
             root[omega] and lags the input by 45 degrees in phase.
             However, if filtering due to realistic synaptic dynamics is
             included, the firing rate is modulated by a finite amount
             even in the limit omega-->infinity and the phase lag is
             eliminated. Thus, through its effect on noise inputs,
             realistic synaptic dynamics can ensure unlagged neuronal
             responses to high-frequency inputs.},
   Doi = {10.1103/physrevlett.86.2186},
   Key = {fds328515}
}

@article{fds328495,
   Author = {Baldassi, C and Braunstein, A and Brunel, N and Zecchina,
             R},
   Title = {Efficient supervised learning in networks with binary
             synapses.},
   Journal = {Proceedings of the National Academy of Sciences of
             USA},
   Volume = {104},
   Number = {26},
   Pages = {11079-11084},
   Year = {2007},
   Month = {June},
   url = {http://dx.doi.org/10.1073/pnas.0700324104},
   Abstract = {Recent experimental studies indicate that synaptic changes
             induced by neuronal activity are discrete jumps between a
             small number of stable states. Learning in systems with
             discrete synapses is known to be a computationally hard
             problem. Here, we study a neurobiologically plausible
             on-line learning algorithm that derives from belief
             propagation algorithms. We show that it performs remarkably
             well in a model neuron with binary synapses, and a finite
             number of "hidden" states per synapse, that has to learn a
             random classification task. Such a system is able to learn a
             number of associations close to the theoretical limit in
             time that is sublinear in system size. This is to our
             knowledge the first on-line algorithm that is able to
             achieve efficiently a finite number of patterns learned per
             binary synapse. Furthermore, we show that performance is
             optimal for a finite number of hidden states that becomes
             very small for sparse coding. The algorithm is similar to
             the standard "perceptron" learning algorithm, with an
             additional rule for synaptic transitions that occur only if
             a currently presented pattern is "barely correct." In this
             case, the synaptic changes are metaplastic only (change in
             hidden states and not in actual synaptic state), stabilizing
             the synapse in its current state. Finally, we show that a
             system with two visible states and K hidden states is much
             more robust to noise than a system with K visible states. We
             suggest that this rule is sufficiently simple to be easily
             implemented by neurobiological systems or in
             hardware.},
   Doi = {10.1073/pnas.0700324104},
   Key = {fds328495}
}

@article{fds328482,
   Author = {Dugué, GP and Brunel, N and Hakim, V and Schwartz, E and Chat, M and Lévesque, M and Courtemanche, R and Léna, C and Dieudonné,
             S},
   Title = {Electrical coupling mediates tunable low-frequency
             oscillations and resonance in the cerebellar Golgi cell
             network.},
   Journal = {Neuron},
   Volume = {61},
   Number = {1},
   Pages = {126-139},
   Year = {2009},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.neuron.2008.11.028},
   Abstract = {Tonic motor control involves oscillatory synchronization of
             activity at low frequency (5-30 Hz) throughout the
             sensorimotor system, including cerebellar areas. We
             investigated the mechanisms underpinning cerebellar
             oscillations. We found that Golgi interneurons, which gate
             information transfer in the cerebellar cortex input layer,
             are extensively coupled through electrical synapses. When
             depolarized in vitro, these neurons displayed low-frequency
             oscillatory synchronization, imposing rhythmic inhibition
             onto granule cells. Combining experiments and modeling, we
             show that electrical transmission of the spike
             afterhyperpolarization is the essential component for
             oscillatory population synchronization. Rhythmic firing
             arises in spite of strong heterogeneities, is frequency
             tuned by the mean excitatory input to Golgi cells, and
             displays pronounced resonance when the modeled network is
             driven by oscillating inputs. In vivo, unitary Golgi cell
             activity was found to synchronize with low-frequency LFP
             oscillations occurring during quiet waking. These results
             suggest a major role for Golgi cells in coordinating
             cerebellar sensorimotor integration during oscillatory
             interactions.},
   Doi = {10.1016/j.neuron.2008.11.028},
   Key = {fds328482}
}

@article{fds328485,
   Author = {Mazzoni, A and Panzeri, S and Logothetis, NK and Brunel,
             N},
   Title = {Encoding of naturalistic stimuli by local field potential
             spectra in networks of excitatory and inhibitory
             neurons.},
   Journal = {PLoS computational biology},
   Volume = {4},
   Number = {12},
   Pages = {e1000239},
   Year = {2008},
   Month = {December},
   url = {http://dx.doi.org/10.1371/journal.pcbi.1000239},
   Abstract = {Recordings of local field potentials (LFPs) reveal that the
             sensory cortex displays rhythmic activity and fluctuations
             over a wide range of frequencies and amplitudes. Yet, the
             role of this kind of activity in encoding sensory
             information remains largely unknown. To understand the rules
             of translation between the structure of sensory stimuli and
             the fluctuations of cortical responses, we simulated a
             sparsely connected network of excitatory and inhibitory
             neurons modeling a local cortical population, and we
             determined how the LFPs generated by the network encode
             information about input stimuli. We first considered simple
             static and periodic stimuli and then naturalistic input
             stimuli based on electrophysiological recordings from the
             thalamus of anesthetized monkeys watching natural movie
             scenes. We found that the simulated network produced
             stimulus-related LFP changes that were in striking agreement
             with the LFPs obtained from the primary visual cortex.
             Moreover, our results demonstrate that the network encoded
             static input spike rates into gamma-range oscillations
             generated by inhibitory-excitatory neural interactions and
             encoded slow dynamic features of the input into slow LFP
             fluctuations mediated by stimulus-neural interactions. The
             model cortical network processed dynamic stimuli with
             naturalistic temporal structure by using low and high
             response frequencies as independent communication channels,
             again in agreement with recent reports from visual cortex
             responses to naturalistic movies. One potential function of
             this frequency decomposition into independent information
             channels operated by the cortical network may be that of
             enhancing the capacity of the cortical column to encode our
             complex sensory environment.},
   Doi = {10.1371/journal.pcbi.1000239},
   Key = {fds328485}
}

@article{fds328474,
   Author = {Hamaguchi, K and Riehle, A and Brunel, N},
   Title = {Estimating network parameters from combined dynamics of
             firing rate and irregularity of single neurons.},
   Journal = {Journal of neurophysiology},
   Volume = {105},
   Number = {1},
   Pages = {487-500},
   Year = {2011},
   Month = {January},
   url = {http://dx.doi.org/10.1152/jn.00858.2009},
   Abstract = {High firing irregularity is a hallmark of cortical neurons
             in vivo, and modeling studies suggest a balance of
             excitation and inhibition is necessary to explain this high
             irregularity. Such a balance must be generated, at least
             partly, from local interconnected networks of excitatory and
             inhibitory neurons, but the details of the local network
             structure are largely unknown. The dynamics of the neural
             activity depends on the local network structure; this in
             turn suggests the possibility of estimating network
             structure from the dynamics of the firing statistics. Here
             we report a new method to estimate properties of the local
             cortical network from the instantaneous firing rate and
             irregularity (CV(2)) under the assumption that recorded
             neurons are a part of a randomly connected sparse network.
             The firing irregularity, measured in monkey motor cortex,
             exhibits two features; many neurons show relatively stable
             firing irregularity in time and across different task
             conditions; the time-averaged CV(2) is widely distributed
             from quasi-regular to irregular (CV(2) = 0.3-1.0). For each
             recorded neuron, we estimate the three parameters of a local
             network [balance of local excitation-inhibition, number of
             recurrent connections per neuron, and excitatory
             postsynaptic potential (EPSP) size] that best describe the
             dynamics of the measured firing rates and irregularities.
             Our analysis shows that optimal parameter sets form a
             two-dimensional manifold in the three-dimensional parameter
             space that is confined for most of the neurons to the
             inhibition-dominated region. High irregularity neurons tend
             to be more strongly connected to the local network, either
             in terms of larger EPSP and inhibitory PSP size or larger
             number of recurrent connections, compared with the low
             irregularity neurons, for a given excitatory/inhibitory
             balance. Incorporating either synaptic short-term depression
             or conductance-based synapses leads many low CV(2) neurons
             to move to the excitation-dominated region as well as to an
             increase of EPSP size.},
   Doi = {10.1152/jn.00858.2009},
   Key = {fds328474}
}

@article{fds328522,
   Author = {Brunel, N and Hakim, V},
   Title = {Fast global oscillations in networks of integrate-and-fire
             neurons with low firing rates.},
   Journal = {Neural Computation},
   Volume = {11},
   Number = {7},
   Pages = {1621-1671},
   Year = {1999},
   Month = {October},
   url = {http://dx.doi.org/10.1162/089976699300016179},
   Abstract = {We study analytically the dynamics of a network of sparsely
             connected inhibitory integrate-and-fire neurons in a regime
             where individual neurons emit spikes irregularly and at a
             low rate. In the limit when the number of neurons -->
             infinity, the network exhibits a sharp transition between a
             stationary and an oscillatory global activity regime where
             neurons are weakly synchronized. The activity becomes
             oscillatory when the inhibitory feedback is strong enough.
             The period of the global oscillation is found to be mainly
             controlled by synaptic times but depends also on the
             characteristics of the external input. In large but finite
             networks, the analysis shows that global oscillations of
             finite coherence time generically exist both above and below
             the critical inhibition threshold. Their characteristics are
             determined as functions of systems parameters in these two
             different regions. The results are found to be in good
             agreement with numerical simulations.},
   Doi = {10.1162/089976699300016179},
   Key = {fds328522}
}

@article{fds328521,
   Author = {Brunel, N and Wang, XJ},
   Title = {Fast network oscillations with intermittent principal cell
             firing in a model of a recurrent excitatory-inhibitory
             circuit},
   Journal = {European Journal of Neuroscience},
   Volume = {12},
   Pages = {79-79},
   Year = {2000},
   Key = {fds328521}
}

@article{fds328524,
   Author = {Brunel, N and Sergi, S},
   Title = {Firing frequency of leaky intergrate-and-fire neurons with
             synaptic current dynamics.},
   Journal = {Journal of Theoretical Biology},
   Volume = {195},
   Number = {1},
   Pages = {87-95},
   Year = {1998},
   Month = {November},
   url = {http://dx.doi.org/10.1006/jtbi.1998.0782},
   Abstract = {We consider a model of an integrate-and-fire neuron with
             synaptic current dynamics, in which the synaptic time
             constant tau' is much smaller than the membrane time
             constant tau. We calculate analytically the firing frequency
             of such a neuron for inputs described by a random Gaussian
             process. We find that the first order correction to the
             frequency due to tau' is proportional to the square root of
             the ratio between these time constants radicaltau'/tau. This
             implies that the correction is important even when the
             synaptic time constant is small compared with that of the
             potential. The frequency of a neuron with tau'>0 can be
             reduced to that of the basic IF neuron (corresponding to
             tau'=1) using an "effective" threshold which has a linear
             dependence on radical tau'/tau. Numerical simulations show a
             very good agreement with the analytical result, and permit
             an extrapolation of the "effective" threshold to higher
             orders in radical tau'/tau. The obtained frequency agrees
             with simulation data for a wide range of
             parameters.},
   Doi = {10.1006/jtbi.1998.0782},
   Key = {fds328524}
}

@article{fds328508,
   Author = {Brunel, N and Latham, PE},
   Title = {Firing rate of the noisy quadratic integrate-and-fire
             neuron.},
   Journal = {Neural Computation},
   Volume = {15},
   Number = {10},
   Pages = {2281-2306},
   Year = {2003},
   Month = {October},
   url = {http://dx.doi.org/10.1162/089976603322362365},
   Abstract = {We calculate the firing rate of the quadratic
             integrate-and-fire neuron in response to a colored noise
             input current. Such an input current is a good approximation
             to the noise due to the random bombardment of spikes, with
             the correlation time of the noise corresponding to the decay
             time of the synapses. The key parameter that determines the
             firing rate is the ratio of the correlation time of the
             colored noise, tau(s), to the neuronal time constant,
             tau(m). We calculate the firing rate exactly in two limits:
             when the ratio, tau(s)/tau(m), goes to zero (white noise)
             and when it goes to infinity. The correction to the short
             correlation time limit is O(tau(s)/tau(m)), which is qualita
             tively different from that of the leaky integrate-and-fire
             neuron, where the correction is O( radical tau(s)/tau(m)).
             The difference is due to the different boundary conditions
             of the probability density function of the membrane
             potential of the neuron at firing threshold. The correction
             to the long correlation time limit is O(tau(m)/tau(s)). By
             combining the short and long correlation time limits, we
             derive an expression that provides a good approximation to
             the firing rate over the whole range of tau(s)/tau(m) in the
             suprathreshold regime-that is, in a regime in which the
             average current is sufficient to make the cell fire. In the
             subthreshold regime, the expression breaks down somewhat
             when tau(s) becomes large compared to tau(m).},
   Doi = {10.1162/089976603322362365},
   Key = {fds328508}
}

@article{fds328511,
   Author = {Brunel, N and Hakim, V and Richardson, MJE},
   Title = {Firing-rate resonance in a generalized integrate-and-fire
             neuron with subthreshold resonance.},
   Journal = {Physical Review E - Statistical, Nonlinear, and Soft Matter
             Physics},
   Volume = {67},
   Number = {5 Pt 1},
   Pages = {051916},
   Year = {2003},
   Month = {May},
   url = {http://dx.doi.org/10.1103/physreve.67.051916},
   Abstract = {Neurons that exhibit a peak at finite frequency in their
             membrane potential response to oscillatory inputs are
             widespread in the nervous system. However, the influence of
             this subthreshold resonance on spiking properties has not
             yet been thoroughly analyzed. To this end, generalized
             integrate-and-fire models are introduced that reproduce at
             the linear level the subthreshold behavior of any given
             conductance-based model. A detailed analysis is presented of
             the simplest resonant model of this kind that has two
             variables: the membrane potential and a supplementary
             voltage-gated resonant variable. The firing-rate modulation
             created by a noisy weak oscillatory drive, mimicking an in
             vivo environment, is computed numerically and analytically
             when the dynamics of the resonant variable is slow compared
             to that of the membrane potential. The results show that the
             firing-rate modulation is shaped by the subthreshold
             resonance. For weak noise, the firing-rate modulation has a
             minimum near the preferred subthreshold frequency. For
             higher noise, such as that prevailing in vivo, the
             firing-rate modulation peaks near the preferred subthreshold
             frequency.},
   Doi = {10.1103/physreve.67.051916},
   Key = {fds328511}
}

@article{fds328466,
   Author = {Brunel, N and Hakim, V},
   Title = {Fokker-Planck Equation.},
   Booktitle = {Encyclopedia of Computational Neuroscience},
   Publisher = {Springer},
   Editor = {Jaeger, D and Jung, R},
   Year = {2014},
   ISBN = {978-1-4614-7320-6},
   url = {http://dx.doi.org/10.1007/978-1-4614-7320-6_60-2},
   Doi = {10.1007/978-1-4614-7320-6_60-2},
   Key = {fds328466}
}

@article{fds328471,
   Author = {Ostojic, S and Brunel, N},
   Title = {From spiking neuron models to linear-nonlinear
             models.},
   Journal = {PLoS computational biology},
   Volume = {7},
   Number = {1},
   Pages = {e1001056},
   Year = {2011},
   Month = {January},
   url = {http://dx.doi.org/10.1371/journal.pcbi.1001056},
   Abstract = {Neurons transform time-varying inputs into action potentials
             emitted stochastically at a time dependent rate. The mapping
             from current input to output firing rate is often
             represented with the help of phenomenological models such as
             the linear-nonlinear (LN) cascade, in which the output
             firing rate is estimated by applying to the input
             successively a linear temporal filter and a static
             non-linear transformation. These simplified models leave out
             the biophysical details of action potential generation. It
             is not a priori clear to which extent the input-output
             mapping of biophysically more realistic, spiking neuron
             models can be reduced to a simple linear-nonlinear cascade.
             Here we investigate this question for the leaky
             integrate-and-fire (LIF), exponential integrate-and-fire
             (EIF) and conductance-based Wang-Buzsáki models in presence
             of background synaptic activity. We exploit available
             analytic results for these models to determine the
             corresponding linear filter and static non-linearity in a
             parameter-free form. We show that the obtained functions are
             identical to the linear filter and static non-linearity
             determined using standard reverse correlation analysis. We
             then quantitatively compare the output of the corresponding
             linear-nonlinear cascade with numerical simulations of
             spiking neurons, systematically varying the parameters of
             input signal and background noise. We find that the LN
             cascade provides accurate estimates of the firing rates of
             spiking neurons in most of parameter space. For the EIF and
             Wang-Buzsáki models, we show that the LN cascade can be
             reduced to a firing rate model, the timescale of which we
             determine analytically. Finally we introduce an adaptive
             timescale rate model in which the timescale of the linear
             filter depends on the instantaneous firing rate. This model
             leads to highly accurate estimates of instantaneous firing
             rates.},
   Doi = {10.1371/journal.pcbi.1001056},
   Key = {fds328471}
}

@article{fds328512,
   Author = {Richardson, MJE and Brunel, N and Hakim, V},
   Title = {From subthreshold to firing-rate resonance.},
   Journal = {Journal of neurophysiology},
   Volume = {89},
   Number = {5},
   Pages = {2538-2554},
   Year = {2003},
   Month = {May},
   url = {http://dx.doi.org/10.1152/jn.00955.2002},
   Abstract = {Many types of neurons exhibit subthreshold resonance.
             However, little is known about whether this frequency
             preference influences spike emission. Here, the link between
             subthreshold resonance and firing rate is examined in the
             framework of conductance-based models. A classification of
             the subthreshold properties of a general class of neurons is
             first provided. In particular, a class of neurons is
             identified in which the input impedance exhibits a
             suppression at a nonzero low frequency as well as a peak at
             higher frequency. The analysis is then extended to the
             effect of subthreshold resonance on the dynamics of the
             firing rate. The considered input current comprises a
             background noise term, mimicking the massive synaptic
             bombardment in vivo. Of interest is the modulatory effect an
             additional weak oscillating current has on the instantaneous
             firing rate. When the noise is weak and firing regular, the
             frequency most preferentially modulated is the firing rate
             itself. Conversely, when the noise is strong and firing
             irregular, the modulation is strongest at the subthreshold
             resonance frequency. These results are demonstrated for two
             specific conductance-based models and for a generalization
             of the integrate-and-fire model that captures subthreshold
             resonance. They suggest that resonant neurons are able to
             communicate their frequency preference to postsynaptic
             targets when the level of noise is comparable to that
             prevailing in vivo.},
   Doi = {10.1152/jn.00955.2002},
   Key = {fds328512}
}

@article{fds328535,
   Author = {Brunel, N},
   Title = {Hebbian learning of context in recurrent neural
             networks.},
   Journal = {Neural Computation},
   Volume = {8},
   Number = {8},
   Pages = {1677-1710},
   Year = {1996},
   Month = {November},
   url = {http://dx.doi.org/10.1162/neco.1996.8.8.1677},
   Abstract = {Single electrode recording in the inferotemporal cortex of
             monkeys during delayed visual memory tasks provide evidence
             for attractor dynamics in the observed region. The
             persistent elevated delay activities could be internal
             representations of features of the learned visual stimuli
             shown to the monkey during training. When uncorrelated
             stimuli are presented during training in a fixed sequence,
             these experiments display significant correlations between
             the internal representations. Recently a simple model of
             attractor neural network has reproduced quantitatively the
             measured correlations. An underlying assumption of the model
             is that the synaptic matrix formed during the training phase
             contains in its efficacies information about the contiguity
             of persistent stimuli in the training sequence. We present
             here a simple unsupervised learning dynamics that produces
             such a synaptic matrix if sequences of stimuli are
             repeatedly presented to the network at fixed order. The
             resulting matrix is then shown to convert temporal
             correlations during training into spatial correlations
             between attractors. The scenario is that, in the presence of
             selective delay activity, at the presentation of each
             stimulus, the activity distribution in the neural assembly
             contain information of both the current stimulus and the
             previous one (carried by the attractor). Thus the recurrent
             synaptic matrix can code not only for each of the stimuli
             presented to the network but also for their context. We
             combine the idea that for learning to be effective, synaptic
             modification should be stochastic, with the fact that
             attractors provide learnable information about two
             consecutive stimuli. We calculate explicitly the probability
             distribution of synaptic efficacies as a function of
             training protocol, that is, the order in which stimuli are
             presented to the network. We then solve for the dynamics of
             a network composed of integrate-and-fire excitatory and
             inhibitory neurons with a matrix of synaptic collaterals
             resulting from the learning dynamics. The network has a
             stable spontaneous activity, and stable delay activity
             develops after a critical learning stage. The availability
             of a learning dynamics makes possible a number of
             experimental predictions for the dependence of the delay
             activity distributions and the correlations between them, on
             the learning stage and the learning protocol. In particular
             it makes specific predictions for pair-associates delay
             experiments.},
   Doi = {10.1162/neco.1996.8.8.1677},
   Key = {fds328535}
}

@article{fds328488,
   Author = {de Solages, C and Szapiro, G and Brunel, N and Hakim, V and Isope, P and Buisseret, P and Rousseau, C and Barbour, B and Léna,
             C},
   Title = {High-frequency organization and synchrony of activity in the
             purkinje cell layer of the cerebellum.},
   Journal = {Neuron},
   Volume = {58},
   Number = {5},
   Pages = {775-788},
   Year = {2008},
   Month = {June},
   url = {http://dx.doi.org/10.1016/j.neuron.2008.05.008},
   Abstract = {The cerebellum controls complex, coordinated, and rapid
             movements, a function requiring precise timing abilities.
             However, the network mechanisms that underlie the temporal
             organization of activity in the cerebellum are largely
             unexplored, because in vivo recordings have usually targeted
             single units. Here, we use tetrode and multisite recordings
             to demonstrate that Purkinje cell activity is synchronized
             by a high-frequency (approximately 200 Hz) population
             oscillation. We combine pharmacological experiments and
             modeling to show how the recurrent inhibitory connections
             between Purkinje cells are sufficient to generate these
             oscillations. A key feature of these oscillations is a fixed
             population frequency that is independent of the firing rates
             of the individual cells. Convergence in the deep cerebellar
             nuclei of Purkinje cell activity, synchronized by these
             oscillations, likely organizes temporally the cerebellar
             output.},
   Doi = {10.1016/j.neuron.2008.05.008},
   Key = {fds328488}
}

@article{fds328479,
   Author = {Ostojic, S and Brunel, N and Hakim, V},
   Title = {How connectivity, background activity, and synaptic
             properties shape the cross-correlation between spike
             trains.},
   Journal = {The Journal of neuroscience : the official journal of the
             Society for Neuroscience},
   Volume = {29},
   Number = {33},
   Pages = {10234-10253},
   Year = {2009},
   Month = {August},
   url = {http://dx.doi.org/10.1523/jneurosci.1275-09.2009},
   Abstract = {Functional interactions between neurons in vivo are often
             quantified by cross-correlation functions (CCFs) between
             their spike trains. It is therefore essential to understand
             quantitatively how CCFs are shaped by different factors,
             such as connectivity, synaptic parameters, and background
             activity. Here, we study the CCF between two neurons using
             analytical calculations and numerical simulations. We
             quantify the role of synaptic parameters, such as peak
             conductance, decay time, and reversal potential, and analyze
             how various patterns of connectivity influence CCF shapes.
             In particular, we find that the symmetry of the CCF
             distinguishes in general, but not always, the case of shared
             inputs between two neurons from the case in which they are
             directly synaptically connected. We systematically examine
             the influence of background synaptic inputs from the
             surrounding network that set the baseline firing statistics
             of the neurons and modulate their response properties. We
             find that variations in the background noise modify the
             amplitude of the cross-correlation function as strongly as
             variations of synaptic strength. In particular, we show that
             the postsynaptic neuron spiking regularity has a pronounced
             influence on CCF amplitude. This suggests an efficient and
             flexible mechanism for modulating functional
             interactions.},
   Doi = {10.1523/jneurosci.1275-09.2009},
   Key = {fds328479}
}

@article{fds328497,
   Author = {Brunel, N and Hansel, D},
   Title = {How noise affects the synchronization properties of
             recurrent networks of inhibitory neurons.},
   Journal = {Neural Computation},
   Volume = {18},
   Number = {5},
   Pages = {1066-1110},
   Year = {2006},
   Month = {May},
   url = {http://dx.doi.org/10.1162/neco.2006.18.5.1066},
   Abstract = {GABAergic interneurons play a major role in the emergence of
             various types of synchronous oscillatory patterns of
             activity in the central nervous system. Motivated by these
             experimental facts, modeling studies have investigated
             mechanisms for the emergence of coherent activity in
             networks of inhibitory neurons. However, most of these
             studies have focused either when the noise in the network is
             absent or weak or in the opposite situation when it is
             strong. Hence, a full picture of how noise affects the
             dynamics of such systems is still lacking. The aim of this
             letter is to provide a more comprehensive understanding of
             the mechanisms by which the asynchronous states in large,
             fully connected networks of inhibitory neurons are
             destabilized as a function of the noise level. Three types
             of single neuron models are considered: the leaky
             integrate-and-fire (LIF) model, the exponential
             integrate-and-fire (EIF), model and conductance-based models
             involving sodium and potassium Hodgkin-Huxley (HH) currents.
             We show that in all models, the instabilities of the
             asynchronous state can be classified in two classes. The
             first one consists of clustering instabilities, which exist
             in a restricted range of noise. These instabilities lead to
             synchronous patterns in which the population of neurons is
             broken into clusters of synchronously firing neurons. The
             irregularity of the firing patterns of the neurons is weak.
             The second class of instabilities, termed oscillatory firing
             rate instabilities, exists at any value of noise. They lead
             to cluster state at low noise. As the noise is increased,
             the instability occurs at larger coupling, and the pattern
             of firing that emerges becomes more irregular. In the regime
             of high noise and strong coupling, these instabilities lead
             to stochastic oscillations in which neurons fire in an
             approximately Poisson way with a common instantaneous
             probability of firing that oscillates in
             time.},
   Doi = {10.1162/neco.2006.18.5.1066},
   Key = {fds328497}
}

@article{fds328505,
   Author = {Fourcaud-Trocmé, N and Hansel, D and van Vreeswijk, C and Brunel,
             N},
   Title = {How spike generation mechanisms determine the neuronal
             response to fluctuating inputs.},
   Journal = {The Journal of neuroscience : the official journal of the
             Society for Neuroscience},
   Volume = {23},
   Number = {37},
   Pages = {11628-11640},
   Year = {2003},
   Month = {December},
   Abstract = {This study examines the ability of neurons to track
             temporally varying inputs, namely by investigating how the
             instantaneous firing rate of a neuron is modulated by a
             noisy input with a small sinusoidal component with frequency
             (f). Using numerical simulations of conductance-based
             neurons and analytical calculations of one-variable
             nonlinear integrate-and-fire neurons, we characterized the
             dependence of this modulation on f. For sufficiently high
             noise, the neuron acts as a low-pass filter. The modulation
             amplitude is approximately constant for frequencies up to a
             cutoff frequency, fc, after which it decays. The cutoff
             frequency increases almost linearly with the firing rate.
             For higher frequencies, the modulation amplitude decays as
             C/falpha, where the power alpha depends on the spike
             initiation mechanism. For conductance-based models, alpha =
             1, and the prefactor C depends solely on the average firing
             rate and a spike "slope factor," which determines the
             sharpness of the spike initiation. These results are
             attributable to the fact that near threshold, the sodium
             activation variable can be approximated by an exponential
             function. Using this feature, we propose a simplified
             one-variable model, the "exponential integrate-and-fire
             neuron," as an approximation of a conductance-based model.
             We show that this model reproduces the dynamics of a simple
             conductance-based model extremely well. Our study shows how
             an intrinsic neuronal property (the characteristics of fast
             sodium channels) determines the speed with which neurons can
             track changes in input.},
   Key = {fds328505}
}

@article{fds328454,
   Author = {Lim, S and McKee, JL and Woloszyn, L and Amit, Y and Freedman, DJ and Sheinberg, DL and Brunel, N},
   Title = {Inferring learning rules from distributions of firing rates
             in cortical neurons.},
   Journal = {Nature Neuroscience},
   Volume = {18},
   Number = {12},
   Pages = {1804-1810},
   Year = {2015},
   Month = {December},
   url = {http://dx.doi.org/10.1038/nn.4158},
   Abstract = {Information about external stimuli is thought to be stored
             in cortical circuits through experience-dependent
             modifications of synaptic connectivity. These modifications
             of network connectivity should lead to changes in neuronal
             activity as a particular stimulus is repeatedly encountered.
             Here we ask what plasticity rules are consistent with the
             differences in the statistics of the visual response to
             novel and familiar stimuli in inferior temporal cortex, an
             area underlying visual object recognition. We introduce a
             method that allows one to infer the dependence of the
             presumptive learning rule on postsynaptic firing rate, and
             we show that the inferred learning rule exhibits depression
             for low postsynaptic rates and potentiation for high rates.
             The threshold separating depression from potentiation is
             strongly correlated with both mean and s.d. of the firing
             rate distribution. Finally, we show that network models
             implementing a rule extracted from data show stable learning
             dynamics and lead to sparser representations of
             stimuli.},
   Doi = {10.1038/nn.4158},
   Key = {fds328454}
}

@article{fds328550,
   Author = {Brunel, N and Nadal, J-P and Toulouse, G},
   Title = {Information capacity of a perceptron},
   Journal = {Journal of Physics A: Mathematical and General},
   Volume = {25},
   Number = {19},
   Pages = {5017-5038},
   Year = {1992},
   Month = {October},
   url = {http://dx.doi.org/10.1088/0305-4470/25/19/015},
   Doi = {10.1088/0305-4470/25/19/015},
   Key = {fds328550}
}

@article{fds328496,
   Author = {Barbieri, F and Brunel, N},
   Title = {Irregular persistent activity induced by synaptic excitatory
             feedback.},
   Journal = {Frontiers in Computational Neuroscience},
   Volume = {1},
   Pages = {5},
   Year = {2007},
   Month = {January},
   url = {http://dx.doi.org/10.3389/neuro.10.005.2007},
   Abstract = {Neurophysiological experiments on monkeys have reported
             highly irregular persistent activity during the performance
             of an oculomotor delayed-response task. These experiments
             show that during the delay period the coefficient of
             variation (CV) of interspike intervals (ISI) of prefrontal
             neurons is above 1, on average, and larger than during the
             fixation period. In the present paper, we show that this
             feature can be reproduced in a network in which persistent
             activity is induced by excitatory feedback, provided that
             (i) the post-spike reset is close enough to threshold , (ii)
             synaptic efficacies are a non-linear function of the
             pre-synaptic firing rate. Non-linearity between pre-synaptic
             rate and effective synaptic strength is implemented by a
             standard short-term depression mechanism (STD). First, we
             consider the simplest possible network with excitatory
             feedback: a fully connected homogeneous network of
             excitatory leaky integrate-and-fire neurons, using both
             numerical simulations and analytical techniques. The results
             are then confirmed in a network with selective excitatory
             neurons and inhibition. In both the cases there is a large
             range of values of the synaptic efficacies for which the
             statistics of firing of single cells is similar to
             experimental data.},
   Doi = {10.3389/neuro.10.005.2007},
   Key = {fds328496}
}

@article{fds328449,
   Author = {Brunel, N},
   Title = {Is cortical connectivity optimized for storing
             information?},
   Journal = {Nature Neuroscience},
   Volume = {19},
   Number = {5},
   Pages = {749-755},
   Year = {2016},
   Month = {May},
   url = {http://dx.doi.org/10.1038/nn.4286},
   Abstract = {Cortical networks are thought to be shaped by
             experience-dependent synaptic plasticity. Theoretical
             studies have shown that synaptic plasticity allows a network
             to store a memory of patterns of activity such that they
             become attractors of the dynamics of the network. Here we
             study the properties of the excitatory synaptic connectivity
             in a network that maximizes the number of stored patterns of
             activity in a robust fashion. We show that the resulting
             synaptic connectivity matrix has the following properties:
             it is sparse, with a large fraction of zero synaptic weights
             ('potential' synapses); bidirectionally coupled pairs of
             neurons are over-represented in comparison to a random
             network; and bidirectionally connected pairs have stronger
             synapses on average than unidirectionally connected pairs.
             All these features reproduce quantitatively available data
             on connectivity in cortex. This suggests synaptic
             connectivity in cortex is optimized to store a large number
             of attractor states in a robust fashion.},
   Doi = {10.1038/nn.4286},
   Key = {fds328449}
}

@article{fds328493,
   Author = {Brunel, N and van Rossum, MCW},
   Title = {Lapicque's 1907 paper: from frogs to integrate-and-fire.},
   Journal = {Biological Cybernetics},
   Volume = {97},
   Number = {5-6},
   Pages = {337-339},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1007/s00422-007-0190-0},
   Abstract = {Exactly 100 years ago, Louis Lapicque published a paper on
             the excitability of nerves that is often cited in the
             context of integrate-and-fire neurons. We discuss Lapicque's
             contributions along with a translation of the original
             publication.},
   Doi = {10.1007/s00422-007-0190-0},
   Key = {fds328493}
}

@article{fds328540,
   Author = {Brunel, N and Amit, DJ},
   Title = {Learning internal representations in an analog attractor
             neural network},
   Journal = {INTERNATIONAL JOURNAL OF NEURAL SYSTEMS, SUPPLEMENTARY
             ISSUE, 1995},
   Pages = {19-23},
   Year = {1995},
   ISBN = {981-02-2482-6},
   Key = {fds328540}
}

@article{fds328538,
   Author = {Amit†, D and Brunel, N},
   Title = {Learning internal representations in an attractor neural
             network with analogue neurons},
   Journal = {Network: Computation in Neural Systems (Informa)},
   Volume = {6},
   Number = {3},
   Pages = {359-388},
   Year = {1995},
   Month = {August},
   url = {http://dx.doi.org/10.1088/0954-898X/6/3/004},
   Doi = {10.1088/0954-898X/6/3/004},
   Key = {fds328538}
}

@article{fds328539,
   Author = {Amit, DJ and Brunel, N},
   Title = {Learning internal representations in an attractor neural
             network with analogue neurons},
   Journal = {Network: Computation in Neural Systems (Informa)},
   Volume = {6},
   Number = {3},
   Pages = {359-388},
   Year = {1995},
   Month = {January},
   url = {http://dx.doi.org/10.1088/0954-898X_6_3_004},
   Doi = {10.1088/0954-898X_6_3_004},
   Key = {fds328539}
}

@article{fds328448,
   Author = {Zampini, V and Liu, JK and Diana, MA and Maldonado, PP and Brunel, N and Dieudonné, S},
   Title = {Mechanisms and functional roles of glutamatergic synapse
             diversity in a cerebellar circuit.},
   Journal = {eLife},
   Volume = {5},
   Year = {2016},
   Month = {September},
   url = {http://dx.doi.org/10.7554/elife.15872},
   Abstract = {Synaptic currents display a large degree of heterogeneity of
             their temporal characteristics, but the functional role of
             such heterogeneities remains unknown. We investigated in rat
             cerebellar slices synaptic currents in Unipolar Brush Cells
             (UBCs), which generate intrinsic mossy fibers relaying
             vestibular inputs to the cerebellar cortex. We show that
             UBCs respond to sinusoidal modulations of their sensory
             input with heterogeneous amplitudes and phase shifts.
             Experiments and modeling indicate that this variability
             results both from the kinetics of synaptic glutamate
             transients and from the diversity of postsynaptic receptors.
             While phase inversion is produced by an mGluR2-activated
             outward conductance in OFF-UBCs, the phase delay of ON UBCs
             is caused by a late rebound current resulting from AMPAR
             recovery from desensitization. Granular layer network
             modeling indicates that phase dispersion of UBC responses
             generates diverse phase coding in the granule cell
             population, allowing climbing-fiber-driven Purkinje cell
             learning at arbitrary phases of the vestibular
             input.},
   Doi = {10.7554/elife.15872},
   Key = {fds328448}
}

@article{fds328477,
   Author = {Graupner, M and Brunel, N},
   Title = {Mechanisms of induction and maintenance of spike-timing
             dependent plasticity in biophysical synapse
             models.},
   Journal = {Frontiers in Computational Neuroscience},
   Volume = {4},
   Year = {2010},
   Month = {January},
   url = {http://dx.doi.org/10.3389/fncom.2010.00136},
   Abstract = {We review biophysical models of synaptic plasticity, with a
             focus on spike-timing dependent plasticity (STDP). The
             common property of the discussed models is that synaptic
             changes depend on the dynamics of the intracellular calcium
             concentration, which itself depends on pre- and postsynaptic
             activity. We start by discussing simple models in which
             plasticity changes are based directly on calcium amplitude
             and dynamics. We then consider models in which dynamic
             intracellular signaling cascades form the link between the
             calcium dynamics and the plasticity changes. Both mechanisms
             of induction of STDP (through the ability of
             pre/postsynaptic spikes to evoke changes in the state of the
             synapse) and of maintenance of the evoked changes (through
             bistability) are discussed.},
   Doi = {10.3389/fncom.2010.00136},
   Key = {fds328477}
}

@article{fds328460,
   Author = {Dubreuil, AM and Amit, Y and Brunel, N},
   Title = {Memory capacity of networks with stochastic binary
             synapses.},
   Journal = {PLoS computational biology},
   Volume = {10},
   Number = {8},
   Pages = {e1003727},
   Year = {2014},
   Month = {August},
   url = {http://dx.doi.org/10.1371/journal.pcbi.1003727},
   Abstract = {In standard attractor neural network models, specific
             patterns of activity are stored in the synaptic matrix, so
             that they become fixed point attractors of the network
             dynamics. The storage capacity of such networks has been
             quantified in two ways: the maximal number of patterns that
             can be stored, and the stored information measured in bits
             per synapse. In this paper, we compute both quantities in
             fully connected networks of N binary neurons with binary
             synapses, storing patterns with coding level [Formula: see
             text], in the large [Formula: see text] and sparse coding
             limits ([Formula: see text]). We also derive finite-size
             corrections that accurately reproduce the results of
             simulations in networks of tens of thousands of neurons.
             These methods are applied to three different scenarios: (1)
             the classic Willshaw model, (2) networks with stochastic
             learning in which patterns are shown only once (one shot
             learning), (3) networks with stochastic learning in which
             patterns are shown multiple times. The storage capacities
             are optimized over network parameters, which allows us to
             compare the performance of the different models. We show
             that finite-size effects strongly reduce the capacity, even
             for networks of realistic sizes. We discuss the implications
             of these results for memory storage in the hippocampus and
             cerebral cortex.},
   Doi = {10.1371/journal.pcbi.1003727},
   Key = {fds328460}
}

@article{fds328458,
   Author = {Higgins, D and Graupner, M and Brunel, N},
   Title = {Memory maintenance in synapses with calcium-based plasticity
             in the presence of background activity.},
   Journal = {PLoS computational biology},
   Volume = {10},
   Number = {10},
   Pages = {e1003834},
   Year = {2014},
   Month = {October},
   url = {http://dx.doi.org/10.1371/journal.pcbi.1003834},
   Abstract = {Most models of learning and memory assume that memories are
             maintained in neuronal circuits by persistent synaptic
             modifications induced by specific patterns of pre- and
             postsynaptic activity. For this scenario to be viable,
             synaptic modifications must survive the ubiquitous ongoing
             activity present in neural circuits in vivo. In this paper,
             we investigate the time scales of memory maintenance in a
             calcium-based synaptic plasticity model that has been shown
             recently to be able to fit different experimental data-sets
             from hippocampal and neocortical preparations. We find that
             in the presence of background activity on the order of 1 Hz
             parameters that fit pyramidal layer 5 neocortical data lead
             to a very fast decay of synaptic efficacy, with time scales
             of minutes. We then identify two ways in which this memory
             time scale can be extended: (i) the extracellular calcium
             concentration in the experiments used to fit the model are
             larger than estimated concentrations in vivo. Lowering
             extracellular calcium concentration to in vivo levels leads
             to an increase in memory time scales of several orders of
             magnitude; (ii) adding a bistability mechanism so that each
             synapse has two stable states at sufficiently low background
             activity leads to a further boost in memory time scale,
             since memory decay is no longer described by an exponential
             decay from an initial state, but by an escape from a
             potential well. We argue that both features are expected to
             be present in synapses in vivo. These results are obtained
             first in a single synapse connecting two independent Poisson
             neurons, and then in simulations of a large network of
             excitatory and inhibitory integrate-and-fire neurons. Our
             results emphasise the need for studying plasticity at
             physiological extracellular calcium concentration, and
             highlight the role of synaptic bi- or multistability in the
             stability of learned synaptic structures.},
   Doi = {10.1371/journal.pcbi.1003834},
   Key = {fds328458}
}

@article{fds328532,
   Author = {Amit, DJ and Brunel, N},
   Title = {Model of global spontaneous activity and local structured
             activity during delay periods in the cerebral
             cortex.},
   Journal = {Cerebral Cortex},
   Volume = {7},
   Number = {3},
   Pages = {237-252},
   Year = {1997},
   Month = {April},
   url = {http://dx.doi.org/10.1093/cercor/7.3.237},
   Abstract = {We investigate self-sustaining stable states (attractors) in
             networks of integrate-and-fire neurons. First, we study the
             stability of spontaneous activity in an unstructured
             network. It is shown that the stochastic background
             activity, of 1-5 spikes/s, is unstable if all neurons are
             excitatory. On the other hand, spontaneous activity becomes
             self-stabilizing in presence of local inhibition, given
             reasonable values of the parameters of the network. Second,
             in a network sustaining physiological spontaneous rates, we
             study the effect of learning in a local module, expressed in
             synaptic modifications in specific populations of synapses.
             We find that if the average synaptic potentiation (LTP) is
             too low, no stimulus specific activity manifests itself in
             the delay period. Instead, following the presentation and
             removal of any stimulus there is, in the local module, a
             delay activity in which all neurons selective (responding
             visually) to any of the stimuli presented for learning have
             rates which gradually increase with the amplitude of
             synaptic potentiation. When the average LTP increases beyond
             a critical value, specific local attractors (stable states)
             appear abruptly against the background of the global uniform
             spontaneous attractor. In this case the local module has two
             available types of collective delay activity: if the
             stimulus is unfamiliar, the activity is spontaneous; if it
             is similar to a learned stimulus, delay activity is
             selective. These new attractors reflect the synaptic
             structure developed during learning. In each of them a small
             population of neurons have elevated rates, which depend on
             the strength of LTP. The remaining neurons of the module
             have their activity at spontaneous rates. The predictions
             made in this paper could be checked by single unit
             recordings in delayed response experiments.},
   Doi = {10.1093/cercor/7.3.237},
   Key = {fds328532}
}

@article{fds328528,
   Author = {Brunel, N and Nadal, JP},
   Title = {Modeling memory: what do we learn from attractor neural
             networks?},
   Journal = {Comptes Rendus Biologies},
   Volume = {321},
   Number = {2-3},
   Pages = {249-252},
   Year = {1998},
   Month = {February},
   url = {http://dx.doi.org/10.1016/s0764-4469(97)89830-7},
   Abstract = {In this paper we summarize some of the main contributions of
             models of recurrent neural networks with associative memory
             properties. We compare the behavior of these attractor
             neural networks with empirical data from both physiology and
             psychology. This type of network could be used in models
             with more complex functions.},
   Doi = {10.1016/s0764-4469(97)89830-7},
   Key = {fds328528}
}

@article{fds328457,
   Author = {Tartaglia, EM and Brunel, N and Mongillo, G},
   Title = {Modulation of network excitability by persistent activity:
             how working memory affects the response to incoming
             stimuli.},
   Journal = {PLoS computational biology},
   Volume = {11},
   Number = {2},
   Pages = {e1004059},
   Year = {2015},
   Month = {February},
   url = {http://dx.doi.org/10.1371/journal.pcbi.1004059},
   Abstract = {Persistent activity and match effects are widely regarded as
             neuronal correlates of short-term storage and manipulation
             of information, with the first serving active maintenance
             and the latter supporting the comparison between memory
             contents and incoming sensory information. The mechanistic
             and functional relationship between these two basic
             neurophysiological signatures of working memory remains
             elusive. We propose that match signals are generated as a
             result of transient changes in local network excitability
             brought about by persistent activity. Neurons more active
             will be more excitable, and thus more responsive to external
             inputs. Accordingly, network responses are jointly
             determined by the incoming stimulus and the ongoing pattern
             of persistent activity. Using a spiking model network, we
             show that this mechanism is able to reproduce most of the
             experimental phenomenology of match effects as exposed by
             single-cell recordings during delayed-response tasks. The
             model provides a unified, parsimonious mechanistic account
             of the main neuronal correlates of working memory, makes
             several experimentally testable predictions, and
             demonstrates a new functional role for persistent
             activity.},
   Doi = {10.1371/journal.pcbi.1004059},
   Key = {fds328457}
}

@article{fds328453,
   Author = {De Pittà and M and Brunel, N},
   Title = {Modulation of Synaptic Plasticity by Glutamatergic
             Gliotransmission: A Modeling Study.},
   Journal = {Neural Plasticity},
   Volume = {2016},
   Pages = {7607924},
   Year = {2016},
   Month = {January},
   url = {http://dx.doi.org/10.1155/2016/7607924},
   Abstract = {Glutamatergic gliotransmission, that is, the release of
             glutamate from perisynaptic astrocyte processes in an
             activity-dependent manner, has emerged as a potentially
             crucial signaling pathway for regulation of synaptic
             plasticity, yet its modes of expression and function in vivo
             remain unclear. Here, we focus on two experimentally
             well-identified gliotransmitter pathways, (i) modulations of
             synaptic release and (ii) postsynaptic slow inward currents
             mediated by glutamate released from astrocytes, and
             investigate their possible functional relevance on synaptic
             plasticity in a biophysical model of an astrocyte-regulated
             synapse. Our model predicts that both pathways could
             profoundly affect both short- and long-term plasticity. In
             particular, activity-dependent glutamate release from
             astrocytes could dramatically change spike-timing-dependent
             plasticity, turning potentiation into depression (and vice
             versa) for the same induction protocol.},
   Doi = {10.1155/2016/7607924},
   Key = {fds328453}
}

@article{fds328525,
   Author = {Brunel, N and Nadal, JP},
   Title = {Mutual information, Fisher information, and population
             coding.},
   Journal = {Neural Computation},
   Volume = {10},
   Number = {7},
   Pages = {1731-1757},
   Year = {1998},
   Month = {October},
   url = {http://dx.doi.org/10.1162/089976698300017115},
   Abstract = {In the context of parameter estimation and model selection,
             it is only quite recently that a direct link between the
             Fisher information and information-theoretic quantities has
             been exhibited. We give an interpretation of this link
             within the standard framework of information theory. We show
             that in the context of population coding, the mutual
             information between the activity of a large array of neurons
             and a stimulus to which the neurons are tuned is naturally
             related to the Fisher information. In the light of this
             result, we consider the optimization of the tuning curves
             parameters in the case of neurons responding to a stimulus
             represented by an angular variable.},
   Doi = {10.1162/089976698300017115},
   Key = {fds328525}
}

@article{fds328484,
   Author = {Brunel, N and Hakim, V},
   Title = {Neuronal Dynamics.},
   Pages = {6099-6116},
   Booktitle = {Encyclopedia of Complexity and Systems Science},
   Publisher = {Springer},
   Editor = {Meyers, RA},
   Year = {2009},
   ISBN = {978-0-387-75888-6},
   url = {http://dx.doi.org/10.1007/978-0-387-30440-3_359},
   Doi = {10.1007/978-0-387-30440-3_359},
   Key = {fds328484}
}

@article{fds328456,
   Author = {Ostojic, S and Szapiro, G and Schwartz, E and Barbour, B and Brunel, N and Hakim, V},
   Title = {Neuronal morphology generates high-frequency firing
             resonance.},
   Journal = {The Journal of neuroscience : the official journal of the
             Society for Neuroscience},
   Volume = {35},
   Number = {18},
   Pages = {7056-7068},
   Year = {2015},
   Month = {May},
   url = {http://dx.doi.org/10.1523/jneurosci.3924-14.2015},
   Abstract = {The attenuation of neuronal voltage responses to
             high-frequency current inputs by the membrane capacitance is
             believed to limit single-cell bandwidth. However, neuronal
             populations subject to stochastic fluctuations can follow
             inputs beyond this limit. We investigated this apparent
             paradox theoretically and experimentally using Purkinje
             cells in the cerebellum, a motor structure that benefits
             from rapid information transfer. We analyzed the modulation
             of firing in response to the somatic injection of sinusoidal
             currents. Computational modeling suggested that, instead of
             decreasing with frequency, modulation amplitude can increase
             up to high frequencies because of cellular morphology.
             Electrophysiological measurements in adult rat slices
             confirmed this prediction and displayed a marked resonance
             at 200 Hz. We elucidated the underlying mechanism, showing
             that the two-compartment morphology of the Purkinje cell,
             interacting with a simple spiking mechanism and dendritic
             fluctuations, is sufficient to create high-frequency signal
             amplification. This mechanism, which we term
             morphology-induced resonance, is selective for somatic
             inputs, which in the Purkinje cell are exclusively
             inhibitory. The resonance sensitizes Purkinje cells in the
             frequency range of population oscillations observed in
             vivo.},
   Doi = {10.1523/jneurosci.3924-14.2015},
   Key = {fds328456}
}

@article{fds328509,
   Author = {Brunel, N and Frégnac, Y and Meunier, C and Nadal,
             J-P},
   Title = {Neuroscience and computation.},
   Journal = {Journal of Physiology - Paris},
   Volume = {97},
   Number = {4-6},
   Pages = {387-390},
   Year = {2003},
   Month = {July},
   url = {http://dx.doi.org/10.1016/j.jphysparis.2004.02.001},
   Doi = {10.1016/j.jphysparis.2004.02.001},
   Key = {fds328509}
}

@article{fds328526,
   Author = {Nadal, JP and Brunel, N and Parga, N},
   Title = {Nonlinear feedforward networks with stochastic outputs:
             infomax implies redundancy reduction.},
   Journal = {Network: Computation in Neural Systems (Informa)},
   Volume = {9},
   Number = {2},
   Pages = {207-217},
   Year = {1998},
   Month = {May},
   url = {http://dx.doi.org/10.1088/0954-898x_9_2_004},
   Abstract = {We prove that maximization of mutual information between the
             output and the input of a feedforward neural network leads
             to full redundancy reduction under the following sufficient
             conditions: (i) the input signal is a (possibly nonlinear)
             invertible mixture of independent components; (ii) there is
             no input noise; (iii) the activity of each output neuron is
             a (possibly) stochastic variable with a probability
             distribution depending on the stimulus through a
             deterministic function of the inputs (where both the
             probability distributions and the functions can be different
             from neuron to neuron); (iv) optimization of the mutual
             information is performed over all these deterministic
             functions. This result extends that obtained by Nadal and
             Parga (1994) who considered the case of deterministic
             outputs.},
   Doi = {10.1088/0954-898x_9_2_004},
   Key = {fds328526}
}

@article{fds328470,
   Author = {Roxin, A and Brunel, N and Hansel, D and Mongillo, G and van Vreeswijk,
             C},
   Title = {On the distribution of firing rates in networks of cortical
             neurons.},
   Journal = {The Journal of neuroscience : the official journal of the
             Society for Neuroscience},
   Volume = {31},
   Number = {45},
   Pages = {16217-16226},
   Year = {2011},
   Month = {November},
   url = {http://dx.doi.org/10.1523/jneurosci.1677-11.2011},
   Abstract = {The distribution of in vivo average firing rates within
             local cortical networks has been reported to be highly
             skewed and long tailed. The distribution of average
             single-cell inputs, conversely, is expected to be Gaussian
             by the central limit theorem. This raises the issue of how a
             skewed distribution of firing rates might result from a
             symmetric distribution of inputs. We argue that skewed rate
             distributions are a signature of the nonlinearity of the in
             vivo f-I curve. During in vivo conditions, ongoing synaptic
             activity produces significant fluctuations in the membrane
             potential of neurons, resulting in an expansive nonlinearity
             of the f-I curve for low and moderate inputs. Here, we
             investigate the effects of single-cell and network
             parameters on the shape of the f-I curve and, by extension,
             on the distribution of firing rates in randomly connected
             networks.},
   Doi = {10.1523/jneurosci.1677-11.2011},
   Key = {fds328470}
}

@article{fds328463,
   Author = {Tartaglia, EM and Mongillo, G and Brunel, N},
   Title = {On the relationship between persistent delay activity,
             repetition enhancement and priming.},
   Journal = {Frontiers in Psychology},
   Volume = {5},
   Pages = {1590},
   Year = {2014},
   Month = {January},
   url = {http://dx.doi.org/10.3389/fpsyg.2014.01590},
   Abstract = {Human efficiency in processing incoming stimuli (in terms of
             speed and/or accuracy) is typically enhanced by previous
             exposure to the same, or closely related stimuli-a
             phenomenon referred to as priming. In spite of the large
             body of knowledge accumulated in behavioral studies about
             the conditions conducive to priming, and its relationship
             with other forms of memory, the underlying neuronal
             correlates of priming are still under debate. The idea has
             repeatedly been advanced that a major neuronal mechanism
             supporting behaviorally-expressed priming is repetition
             suppression, a widespread reduction of spiking activity upon
             stimulus repetition which has been routinely exposed by
             single-unit recordings in non-human primates performing
             delayed-response, as well as passive fixation tasks. This
             proposal is mainly motivated by the observation that, in
             human fMRI studies, priming is associated to a significant
             reduction of the BOLD signal (widely interpreted as a proxy
             of the level of spiking activity) upon stimulus repetition.
             Here, we critically re-examine a large part of the
             electrophysiological literature on repetition suppression in
             non-human primates and find that repetition suppression is
             systematically accompanied by stimulus-selective delay
             period activity, together with repetition enhancement, an
             increase of spiking activity upon stimulus repetition in
             small neuronal populations. We argue that repetition
             enhancement constitutes a more viable candidate for a
             putative neuronal substrate of priming, and propose a
             minimal framework that links together, mechanistically and
             functionally, repetition suppression, stimulus-selective
             delay activity and repetition enhancement.},
   Doi = {10.3389/fpsyg.2014.01590},
   Key = {fds328463}
}

@article{fds328504,
   Author = {Brunel, N and Hakim, V and Isope, P and Nadal, J-P and Barbour,
             B},
   Title = {Optimal information storage and the distribution of synaptic
             weights: perceptron versus Purkinje cell.},
   Journal = {Neuron},
   Volume = {43},
   Number = {5},
   Pages = {745-757},
   Year = {2004},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.neuron.2004.08.023},
   Abstract = {It is widely believed that synaptic modifications underlie
             learning and memory. However, few studies have examined what
             can be deduced about the learning process from the
             distribution of synaptic weights. We analyze the perceptron,
             a prototypical feedforward neural network, and obtain the
             optimal synaptic weight distribution for a perceptron with
             excitatory synapses. It contains more than 50% silent
             synapses, and this fraction increases with storage
             reliability: silent synapses are therefore a necessary
             byproduct of optimizing learning and reliability. Exploiting
             the classical analogy between the perceptron and the
             cerebellar Purkinje cell, we fitted the optimal weight
             distribution to that measured for granule cell-Purkinje cell
             synapses. The two distributions agreed well, suggesting that
             the Purkinje cell can learn up to 5 kilobytes of
             information, in the form of 40,000 input-output
             associations.},
   Doi = {10.1016/j.neuron.2004.08.023},
   Key = {fds328504}
}

@article{fds328467,
   Author = {Clopath, C and Brunel, N},
   Title = {Optimal properties of analog perceptrons with excitatory
             weights.},
   Journal = {PLoS computational biology},
   Volume = {9},
   Number = {2},
   Pages = {e1002919},
   Year = {2013},
   Month = {January},
   url = {http://dx.doi.org/10.1371/journal.pcbi.1002919},
   Abstract = {The cerebellum is a brain structure which has been
             traditionally devoted to supervised learning. According to
             this theory, plasticity at the Parallel Fiber (PF) to
             Purkinje Cell (PC) synapses is guided by the Climbing fibers
             (CF), which encode an 'error signal'. Purkinje cells have
             thus been modeled as perceptrons, learning input/output
             binary associations. At maximal capacity, a perceptron with
             excitatory weights expresses a large fraction of zero-weight
             synapses, in agreement with experimental findings. However,
             numerous experiments indicate that the firing rate of
             Purkinje cells varies in an analog, not binary, manner. In
             this paper, we study the perceptron with analog inputs and
             outputs. We show that the optimal input has a sparse binary
             distribution, in good agreement with the burst firing of the
             Granule cells. In addition, we show that the weight
             distribution consists of a large fraction of silent
             synapses, as in previously studied binary perceptron models,
             and as seen experimentally.},
   Doi = {10.1371/journal.pcbi.1002919},
   Key = {fds328467}
}

@article{fds328534,
   Author = {Brunel, N and Nadal, J-P},
   Title = {Optimal tuning curves for neurons spiking as a Poisson
             process.},
   Journal = {ESANN},
   Publisher = {D-Facto public},
   Editor = {Verleysen, M},
   Year = {1997},
   ISBN = {2-9600049-7-3},
   Key = {fds328534}
}

@article{fds328516,
   Author = {Brunel, N},
   Title = {Persistent activity and the single-cell frequency-current
             curve in a cortical network model.},
   Journal = {Network: Computation in Neural Systems (Informa)},
   Volume = {11},
   Number = {4},
   Pages = {261-280},
   Year = {2000},
   Month = {November},
   url = {http://dx.doi.org/10.1088/0954-898x_11_4_302},
   Abstract = {Neurophysiological experiments indicate that working memory
             of an object is maintained by the persistent activity of
             cells in the prefrontal cortex and infero-temporal cortex of
             the monkey. This paper considers a cortical network model in
             which this persistent activity appears due to recurrent
             synaptic interactions. The conditions under which the
             magnitude of spontaneous and persistent activity are close
             to one another (as is found empirically) are investigated
             using a simplified mean-field description in which firing
             rates in these states are given by the intersections of a
             straight line with the f-I curve of a single pyramidal cell.
             The present analysis relates a network phenomenon -
             persistent activity in a 'working memory' state - to
             single-cell data which are accessible to experiment. It
             predicts that, in networks of the cerebral cortex in which
             persistent activity phenomena are observed, average synaptic
             inputs in both spontaneous and persistent activity should
             bring the cells close to firing threshold. Cells should be
             slightly sub-threshold in spontaneous activity, and slightly
             supra-threshold in persistent activity. The results are
             shown to be robust to the inclusion of inhomogeneities that
             produce wide distributions of firing rates, in both
             spontaneous and working memory states.},
   Doi = {10.1088/0954-898x_11_4_302},
   Key = {fds328516}
}

@article{fds328519,
   Author = {Brunel, N},
   Title = {Phase diagrams of sparsely connected networks of excitatory
             and inhibitory spiking neurons},
   Journal = {Neurocomputing},
   Volume = {32-33},
   Pages = {307-312},
   Year = {2000},
   Month = {June},
   url = {http://dx.doi.org/10.1016/S0925-2312(00)00179-X},
   Doi = {10.1016/S0925-2312(00)00179-X},
   Key = {fds328519}
}

@article{fds328523,
   Author = {Brunel, N and Trullier, O},
   Title = {Plasticity of directional place fields in a model of rodent
             CA3},
   Journal = {Hippocampus},
   Volume = {8},
   Number = {6},
   Pages = {651-665},
   Year = {1998},
   Month = {December},
   url = {http://dx.doi.org/10.1002/(SICI)1098-1063(1998)8:6<651::AID-HIPO8>3.0.CO},
   Abstract = {We propose a computational model of the CA3 region of the
             rat hippocampus that is able to reproduce the available
             experimental data concerning the dependence of directional
             selectivity of the place cell discharge on the environment
             and on the spatial task. The main feature of our model is a
             continuous, unsupervised Hebbian learning dynamics of
             recurrent connections, which is driven by the neuronal
             activities imposed upon the network by the
             environment-dependent external input. In our simulations,
             the environment and the movements of the rat are chosen to
             mimic those commonly observed in neurophysiological
             experiments. The environment is represented as local views
             that depend on both the position and the heading direction
             of the rat. We hypothesize that place cells are
             intrinsically directional, that is, they respond to local
             views. We show that the synaptic dynamics in the recurrent
             neural network rapidly modify the discharge correlates of
             the place cells: Cells tend to become omnidirectional place
             cells in open fields, while their directionality tends to
             get stronger in radial-arm mazes. We also find that the
             synaptic learning mechanisms account for other properties of
             place cell activity, such as an increase in the place cell
             peak firing rates as well as clustering of place fields
             during exploration. Our model makes several experimental
             predictions that can be tested using current
             techniques.},
   Doi = {10.1002/(SICI)1098-1063(1998)8:6<651::AID-HIPO8>3.0.CO},
   Key = {fds328523}
}

@article{fds328529,
   Author = {Brunel, N and Trullier, O},
   Title = {Plasticity of directional place fields in a model of rodent
             CA3.},
   Journal = {Hippocampus},
   Volume = {8},
   Number = {6},
   Pages = {651-665},
   Year = {1998},
   Month = {January},
   url = {http://dx.doi.org/10.1002/(sici)1098-1063(1998)8:6<651::aid-hipo8>3.0.co;2-l},
   Abstract = {We propose a computational model of the CA3 region of the
             rat hippocampus that is able to reproduce the available
             experimental data concerning the dependence of directional
             selectivity of the place cell discharge on the environment
             and on the spatial task. The main feature of our model is a
             continuous, unsupervised Hebbian learning dynamics of
             recurrent connections, which is driven by the neuronal
             activities imposed upon the network by the
             environment-dependent external input. In our simulations,
             the environment and the movements of the rat are chosen to
             mimic those commonly observed in neurophysiological
             experiments. The environment is represented as local views
             that depend on both the position and the heading direction
             of the rat. We hypothesize that place cells are
             intrinsically directional, that is, they respond to local
             views. We show that the synaptic dynamics in the recurrent
             neural network rapidly modify the discharge correlates of
             the place cells: Cells tend to become omnidirectional place
             cells in open fields, while their directionality tends to
             get stronger in radial-arm mazes. We also find that the
             synaptic learning mechanisms account for other properties of
             place cell activity, such as an increase in the place cell
             peak firing rates as well as clustering of place fields
             during exploration. Our model makes several experimental
             predictions that can be tested using current
             techniques.},
   Doi = {10.1002/(sici)1098-1063(1998)8:6<651::aid-hipo8>3.0.co;2-l},
   Key = {fds328529}
}

@article{fds328465,
   Author = {Brunel, N and Hakim, V},
   Title = {Population Density Models.},
   Booktitle = {Encyclopedia of Computational Neuroscience},
   Publisher = {Springer},
   Editor = {Jaeger, D and Jung, R},
   Year = {2014},
   ISBN = {978-1-4614-7320-6},
   url = {http://dx.doi.org/10.1007/978-1-4614-7320-6_74-1},
   Doi = {10.1007/978-1-4614-7320-6_74-1},
   Key = {fds328465}
}

@article{fds328541,
   Author = {Brunel, N},
   Title = {Quantitative modeling of local Hebbian reverberations in
             primate cortex},
   Journal = {INTERNATIONAL JOURNAL OF NEURAL SYSTEMS, SUPPLEMENTARY
             ISSUE, 1995},
   Pages = {13-17},
   Year = {1995},
   ISBN = {981-02-2482-6},
   Key = {fds328541}
}

@article{fds328498,
   Author = {Roxin, A and Brunel, N and Hansel, D},
   Title = {Rate Models with Delays and the Dynamics of Large Networks
             of Spiking Neurons},
   Journal = {Progress of Theoretical Physics Supplement},
   Volume = {161},
   Pages = {68-85},
   Year = {2006},
   url = {http://dx.doi.org/10.1143/PTPS.161.68},
   Doi = {10.1143/PTPS.161.68},
   Key = {fds328498}
}

@article{fds328546,
   Author = {Brunel, N and Zecchina, R},
   Title = {Response functions improving performance in analog attractor
             neural networks.},
   Journal = {Physical Review E - Statistical Physics, Plasmas, Fluids,
             and Related Interdisciplinary Topics},
   Volume = {49},
   Number = {3},
   Pages = {R1823-R1826},
   Year = {1994},
   Month = {March},
   url = {http://dx.doi.org/10.1103/physreve.49.r1823},
   Doi = {10.1103/physreve.49.r1823},
   Key = {fds328546}
}

@article{fds328507,
   Author = {Mongillo, G and Amit, DJ and Brunel, N},
   Title = {Retrospective and prospective persistent activity induced by
             Hebbian learning in a recurrent cortical
             network.},
   Journal = {European Journal of Neuroscience},
   Volume = {18},
   Number = {7},
   Pages = {2011-2024},
   Year = {2003},
   Month = {October},
   url = {http://dx.doi.org/10.1046/j.1460-9568.2003.02908.x},
   Abstract = {Recordings from cells in the associative cortex of monkeys
             performing visual working memory tasks link persistent
             neuronal activity, long-term memory and associative memory.
             In particular, delayed pair-associate tasks have revealed
             neuronal correlates of long-term memory of associations
             between stimuli. Here, a recurrent cortical network model
             with Hebbian plastic synapses is subjected to the
             pair-associate protocol. In a first stage, learning leads to
             the appearance of delay activity, representing individual
             images ('retrospective' activity). As learning proceeds, the
             same learning mechanism uses retrospective delay activity
             together with choice stimulus activity to potentiate
             synapses connecting neural populations representing
             associated images. As a result, the neural population
             corresponding to the pair-associate of the image presented
             is activated prior to its visual stimulation ('prospective'
             activity). The probability of appearance of prospective
             activity is governed by the strength of the inter-population
             connections, which in turn depends on the frequency of
             pairings during training. The time course of the transitions
             from retrospective to prospective activity during the delay
             period is found to depend on the fraction of slow,
             N-methyl-d-aspartate-like receptors at excitatory synapses.
             For fast recurrent excitation, transitions are abrupt; slow
             recurrent excitation renders transitions gradual. Both
             scenarios lead to a gradual rise of delay activity when
             averaged over many trials, because of the stochastic nature
             of the transitions. The model reproduces most of the
             neuro-physiological data obtained during such tasks, makes
             experimentally testable predictions and demonstrates how
             persistent activity (working memory) brings about the
             learning of long-term associations.},
   Doi = {10.1046/j.1460-9568.2003.02908.x},
   Key = {fds328507}
}

@article{fds328501,
   Author = {Roxin, A and Brunel, N and Hansel, D},
   Title = {Role of delays in shaping spatiotemporal dynamics of
             neuronal activity in large networks.},
   Journal = {Physical Review Letters},
   Volume = {94},
   Number = {23},
   Pages = {238103},
   Year = {2005},
   Month = {June},
   url = {http://dx.doi.org/10.1103/physrevlett.94.238103},
   Abstract = {We study the effect of delays on the dynamics of large
             networks of neurons. We show that delays give rise to a
             wealth of bifurcations and to a rich phase diagram, which
             includes oscillatory bumps, traveling waves, lurching waves,
             standing waves arising via a period-doubling bifurcation,
             aperiodic regimes, and regimes of multistability. We study
             the existence and the stability of the various dynamical
             patterns analytically and numerically in a simplified rate
             model as a function of the interaction parameters. The
             results derived in that framework allow us to understand the
             origin of the diversity of dynamical states observed in
             large networks of spiking neurons.},
   Doi = {10.1103/physrevlett.94.238103},
   Key = {fds328501}
}

@article{fds328478,
   Author = {Brunel, N and Lavigne, F},
   Title = {Semantic priming in a cortical network model.},
   Journal = {Journal of Cognitive Neuroscience},
   Volume = {21},
   Number = {12},
   Pages = {2300-2319},
   Year = {2009},
   Month = {December},
   url = {http://dx.doi.org/10.1162/jocn.2008.21156},
   Abstract = {Contextual recall in humans relies on the semantic
             relationships between items stored in memory. These
             relationships can be probed by priming experiments. Such
             experiments have revealed a rich phenomenology on how
             reaction times depend on various factors such as strength
             and nature of associations, time intervals between stimulus
             presentations, and so forth. Experimental protocols on
             humans present striking similarities with pair association
             task experiments in monkeys. Electrophysiological recordings
             of cortical neurons in such tasks have found two types of
             task-related activity, "retrospective" (related to a
             previously shown stimulus), and "prospective" (related to a
             stimulus that the monkey expects to appear, due to learned
             association between both stimuli). Mathematical models of
             cortical networks allow theorists to understand the link
             between the physiology of single neurons and synapses, and
             network behavior giving rise to retrospective and/or
             prospective activity. Here, we show that this type of
             network model can account for a large variety of priming
             effects. Furthermore, the model allows us to interpret
             semantic priming differences between the two hemispheres as
             depending on a single association strength
             parameter.},
   Doi = {10.1162/jocn.2008.21156},
   Key = {fds328478}
}

@article{fds328476,
   Author = {Panzeri, S and Brunel, N and Logothetis, NK and Kayser,
             C},
   Title = {Sensory neural codes using multiplexed temporal
             scales.},
   Journal = {Trends in Neurosciences},
   Volume = {33},
   Number = {3},
   Pages = {111-120},
   Year = {2010},
   Month = {March},
   url = {http://dx.doi.org/10.1016/j.tins.2009.12.001},
   Abstract = {Determining how neuronal activity represents sensory
             information is central for understanding perception. Recent
             work shows that neural responses at different timescales can
             encode different stimulus attributes, resulting in a
             temporal multiplexing of sensory information. Multiplexing
             increases the encoding capacity of neural responses, enables
             disambiguation of stimuli that cannot be discriminated at a
             single response timescale, and makes sensory representations
             stable to the presence of variability in the sensory world.
             Thus, as we discuss here, temporal multiplexing could be a
             key strategy used by the brain to form an information-rich
             and stable representation of the environment.},
   Doi = {10.1016/j.tins.2009.12.001},
   Key = {fds328476}
}

@article{fds328462,
   Author = {Brunel, N and Hakim, V and Richardson, MJE},
   Title = {Single neuron dynamics and computation.},
   Journal = {Current Opinion in Neurobiology},
   Volume = {25},
   Pages = {149-155},
   Year = {2014},
   Month = {April},
   url = {http://dx.doi.org/10.1016/j.conb.2014.01.005},
   Abstract = {At the single neuron level, information processing involves
             the transformation of input spike trains into an appropriate
             output spike train. Building upon the classical view of a
             neuron as a threshold device, models have been developed in
             recent years that take into account the diverse
             electrophysiological make-up of neurons and accurately
             describe their input-output relations. Here, we review these
             recent advances and survey the computational roles that they
             have uncovered for various electrophysiological properties,
             for dendritic arbor anatomy as well as for short-term
             synaptic plasticity.},
   Doi = {10.1016/j.conb.2014.01.005},
   Key = {fds328462}
}

@article{fds328527,
   Author = {Brunel, N and Carusi, F and Fusi, S},
   Title = {Slow stochastic Hebbian learning of classes of stimuli in a
             recurrent neural network.},
   Journal = {Network: Computation in Neural Systems (Informa)},
   Volume = {9},
   Number = {1},
   Pages = {123-152},
   Year = {1998},
   Month = {February},
   url = {http://dx.doi.org/10.1088/0954-898x_9_1_007},
   Abstract = {We study unsupervised Hebbian learning in a recurrent
             network in which synapses have a finite number of stable
             states. Stimuli received by the network are drawn at random
             at each presentation from a set of classes. Each class is
             defined as a cluster in stimulus space, centred on the class
             prototype. The presentation protocol is chosen to mimic the
             protocols of visual memory experiments in which a set of
             stimuli is presented repeatedly in a random way. The
             statistics of the input stream may be stationary, or
             changing. Each stimulus induces, in a stochastic way,
             transitions between stable synaptic states. Learning
             dynamics is studied analytically in the slow learning limit,
             in which a given stimulus has to be presented many times
             before it is memorized, i.e. before synaptic modifications
             enable a pattern of activity correlated with the stimulus to
             become an attractor of the recurrent network. We show that
             in this limit the synaptic matrix becomes more correlated
             with the class prototypes than with any of the instances of
             the class. We also show that the number of classes that can
             be learned increases sharply when the coding level
             decreases, and determine the speeds of learning and
             forgetting of classes in the case of changes in the
             statistics of the input stream.},
   Doi = {10.1088/0954-898x_9_1_007},
   Key = {fds328527}
}

@article{fds328489,
   Author = {Brunel, N and Hakim, V},
   Title = {Sparsely synchronized neuronal oscillations.},
   Journal = {Chaos},
   Volume = {18},
   Number = {1},
   Pages = {015113},
   Year = {2008},
   Month = {March},
   url = {http://dx.doi.org/10.1063/1.2779858},
   Abstract = {We discuss here the properties of fast global oscillations
             that emerge in networks of neurons firing irregularly at a
             low rate. We first provide a simple introduction to these
             sparsely synchronized oscillations, then show how they can
             be studied analytically in the simple setting of rate models
             and leaky integrate-and-fire neurons, and finally describe
             how various neurophysiological features can be incorporated
             in this framework. We end by a comparison of experimental
             data and theoretical results.},
   Doi = {10.1063/1.2779858},
   Key = {fds328489}
}

@article{fds328494,
   Author = {Graupner, M and Brunel, N},
   Title = {STDP in a bistable synapse model based on CaMKII and
             associated signaling pathways.},
   Journal = {PLoS computational biology},
   Volume = {3},
   Number = {11},
   Pages = {e221},
   Year = {2007},
   Month = {November},
   url = {http://dx.doi.org/10.1371/journal.pcbi.0030221},
   Abstract = {The calcium/calmodulin-dependent protein kinase II (CaMKII)
             plays a key role in the induction of long-term postsynaptic
             modifications following calcium entry. Experiments suggest
             that these long-term synaptic changes are all-or-none
             switch-like events between discrete states. The biochemical
             network involving CaMKII and its regulating protein
             signaling cascade has been hypothesized to durably maintain
             the evoked synaptic state in the form of a bistable switch.
             However, it is still unclear whether experimental LTP/LTD
             protocols lead to corresponding transitions between the two
             states in realistic models of such a network. We present a
             detailed biochemical model of the CaMKII autophosphorylation
             and the protein signaling cascade governing the CaMKII
             dephosphorylation. As previously shown, two stable states of
             the CaMKII phosphorylation level exist at resting
             intracellular calcium concentration, and high calcium
             transients can switch the system from the weakly
             phosphorylated (DOWN) to the highly phosphorylated (UP)
             state of the CaMKII (similar to a LTP event). We show here
             that increased CaMKII dephosphorylation activity at
             intermediate Ca(2+) concentrations can lead to switching
             from the UP to the DOWN state (similar to a LTD event). This
             can be achieved if protein phosphatase activity promoting
             CaMKII dephosphorylation activates at lower Ca(2+) levels
             than kinase activity. Finally, it is shown that the CaMKII
             system can qualitatively reproduce results of plasticity
             outcomes in response to spike-timing dependent plasticity
             (STDP) and presynaptic stimulation protocols. This shows
             that the CaMKII protein network can account for both
             induction, through LTP/LTD-like transitions, and storage,
             due to its bistability, of synaptic changes.},
   Doi = {10.1371/journal.pcbi.0030221},
   Key = {fds328494}
}

@article{fds328459,
   Author = {Barbieri, F and Mazzoni, A and Logothetis, NK and Panzeri, S and Brunel,
             N},
   Title = {Stimulus dependence of local field potential spectra:
             experiment versus theory.},
   Journal = {The Journal of neuroscience : the official journal of the
             Society for Neuroscience},
   Volume = {34},
   Number = {44},
   Pages = {14589-14605},
   Year = {2014},
   Month = {October},
   url = {http://dx.doi.org/10.1523/jneurosci.5365-13.2014},
   Abstract = {The local field potential (LFP) captures different neural
             processes, including integrative synaptic dynamics that
             cannot be observed by measuring only the spiking activity of
             small populations. Therefore, investigating how LFP power is
             modulated by external stimuli can offer important insights
             into sensory neural representations. However, gaining such
             insight requires developing data-driven computational models
             that can identify and disambiguate the neural contributions
             to the LFP. Here, we investigated how networks of excitatory
             and inhibitory integrate-and-fire neurons responding to
             time-dependent inputs can be used to interpret sensory
             modulations of LFP spectra. We computed analytically from
             such models the LFP spectra and the information that they
             convey about input and used these analytical expressions to
             fit the model to LFPs recorded in V1 of anesthetized
             macaques (Macaca mulatta) during the presentation of color
             movies. Our expressions explain 60%-98% of the variance of
             the LFP spectrum shape and its dependency upon movie scenes
             and we achieved this with realistic values for the best-fit
             parameters. In particular, synaptic best-fit parameters were
             compatible with experimental measurements and the
             predictions of firing rates, based only on the fit of LFP
             data, correlated with the multiunit spike rate recorded from
             the same location. Moreover, the parameters characterizing
             the input to the network across different movie scenes
             correlated with cross-scene changes of several image
             features. Our findings suggest that analytical descriptions
             of spiking neuron networks may become a crucial tool for the
             interpretation of field recordings.},
   Doi = {10.1523/jneurosci.5365-13.2014},
   Key = {fds328459}
}

@article{fds328545,
   Author = {Brunel, N},
   Title = {Storage capacity of neural networks: effect of the
             fluctuations of the number of active neurons per
             memory},
   Journal = {Journal of Physics A: Mathematical and General},
   Volume = {27},
   Number = {14},
   Pages = {4783-4789},
   Year = {1994},
   Month = {July},
   url = {http://dx.doi.org/10.1088/0305-4470/27/14/009},
   Doi = {10.1088/0305-4470/27/14/009},
   Key = {fds328545}
}

@article{fds328469,
   Author = {Clopath, C and Nadal, J-P and Brunel, N},
   Title = {Storage of correlated patterns in standard and bistable
             Purkinje cell models.},
   Journal = {PLoS computational biology},
   Volume = {8},
   Number = {4},
   Pages = {e1002448},
   Year = {2012},
   Month = {January},
   url = {http://dx.doi.org/10.1371/journal.pcbi.1002448},
   Abstract = {The cerebellum has long been considered to undergo
             supervised learning, with climbing fibers acting as a
             'teaching' or 'error' signal. Purkinje cells (PCs), the sole
             output of the cerebellar cortex, have been considered as
             analogs of perceptrons storing input/output associations. In
             support of this hypothesis, a recent study found that the
             distribution of synaptic weights of a perceptron at maximal
             capacity is in striking agreement with experimental data in
             adult rats. However, the calculation was performed using
             random uncorrelated inputs and outputs. This is a clearly
             unrealistic assumption since sensory inputs and motor
             outputs carry a substantial degree of temporal correlations.
             In this paper, we consider a binary output neuron with a
             large number of inputs, which is required to store
             associations between temporally correlated sequences of
             binary inputs and outputs, modelled as Markov chains.
             Storage capacity is found to increase with both input and
             output correlations, and diverges in the limit where both go
             to unity. We also investigate the capacity of a bistable
             output unit, since PCs have been shown to be bistable in
             some experimental conditions. Bistability is shown to
             enhance storage capacity whenever the output correlation is
             stronger than the input correlation. Distribution of
             synaptic weights at maximal capacity is shown to be
             independent on correlations, and is also unaffected by the
             presence of bistability.},
   Doi = {10.1371/journal.pcbi.1002448},
   Key = {fds328469}
}

@article{fds328451,
   Author = {Dubreuil, AM and Brunel, N},
   Title = {Storing structured sparse memories in a multi-modular
             cortical network model.},
   Journal = {Journal of Computational Neuroscience},
   Volume = {40},
   Number = {2},
   Pages = {157-175},
   Year = {2016},
   Month = {April},
   url = {http://dx.doi.org/10.1007/s10827-016-0590-z},
   Abstract = {We study the memory performance of a class of modular
             attractor neural networks, where modules are potentially
             fully-connected networks connected to each other via diluted
             long-range connections. On this anatomical architecture we
             store memory patterns of activity using a Willshaw-type
             learning rule. P patterns are split in categories, such that
             patterns of the same category activate the same set of
             modules. We first compute the maximal storage capacity of
             these networks. We then investigate their error-correction
             properties through an exhaustive exploration of parameter
             space, and identify regions where the networks behave as an
             associative memory device. The crucial parameters that
             control the retrieval abilities of the network are (1) the
             ratio between the number of synaptic contacts of long- and
             short-range origins (2) the number of categories in which a
             module is activated and (3) the amount of local inhibition.
             We discuss the relationship between our model and networks
             of cortical patches that have been observed in different
             cortical areas.},
   Doi = {10.1007/s10827-016-0590-z},
   Key = {fds328451}
}

@article{fds328517,
   Author = {Compte, A and Brunel, N and Goldman-Rakic, PS and Wang,
             XJ},
   Title = {Synaptic mechanisms and network dynamics underlying spatial
             working memory in a cortical network model.},
   Journal = {Cerebral Cortex},
   Volume = {10},
   Number = {9},
   Pages = {910-923},
   Year = {2000},
   Month = {September},
   url = {http://dx.doi.org/10.1093/cercor/10.9.910},
   Abstract = {Single-neuron recordings from behaving primates have
             established a link between working memory processes and
             information-specific neuronal persistent activity in the
             prefrontal cortex. Using a network model endowed with a
             columnar architecture and based on the physiological
             properties of cortical neurons and synapses, we have
             examined the synaptic mechanisms of selective persistent
             activity underlying spatial working memory in the prefrontal
             cortex. Our model reproduces the phenomenology of the
             oculomotor delayed-response experiment of Funahashi et al.
             (S. Funahashi, C.J. Bruce and P.S. Goldman-Rakic, Mnemonic
             coding of visual space in the monkey's dorsolateral
             prefrontal cortex. J Neurophysiol 61:331-349, 1989). To
             observe stable spontaneous and persistent activity, we find
             that recurrent synaptic excitation should be primarily
             mediated by NMDA receptors, and that overall recurrent
             synaptic interactions should be dominated by inhibition.
             Isodirectional tuning of adjacent pyramidal cells and
             interneurons can be accounted for by a structured
             pyramid-to-interneuron connectivity. Robust memory storage
             against random drift of the tuned persistent activity and
             against distractors (intervening stimuli during the delay
             period) may be enhanced by neuromodulation of recurrent
             synapses. Experimentally testable predictions concerning the
             neural basis of working memory are discussed.},
   Doi = {10.1093/cercor/10.9.910},
   Key = {fds328517}
}

@article{fds328480,
   Author = {Ostojic, S and Brunel, N and Hakim, V},
   Title = {Synchronization properties of networks of electrically
             coupled neurons in the presence of noise and
             heterogeneities.},
   Journal = {Journal of Computational Neuroscience},
   Volume = {26},
   Number = {3},
   Pages = {369-392},
   Year = {2009},
   Month = {June},
   url = {http://dx.doi.org/10.1007/s10827-008-0117-3},
   Abstract = {We investigate how synchrony can be generated or induced in
             networks of electrically coupled integrate-and-fire neurons
             subject to noisy and heterogeneous inputs. Using analytical
             tools, we find that in a network under constant external
             inputs, synchrony can appear via a Hopf bifurcation from the
             asynchronous state to an oscillatory state. In a homogeneous
             net work, in the oscillatory state all neurons fire in
             synchrony, while in a heterogeneous network synchrony is
             looser, many neurons skipping cycles of the oscillation. If
             the transmission of action potentials via the electrical
             synapses is effectively excitatory, the Hopf bifurcation is
             supercritical, while effectively inhibitory transmission due
             to pronounced hyperpolarization leads to a subcritical
             bifurcation. In the latter case, the network exhibits
             bistability between an asynchronous state and an oscillatory
             state where all the neurons fire in synchrony. Finally we
             show that for time-varying external inputs, electrical
             coupling enhances the synchronization in an asynchronous
             network via a resonance at the firing-rate
             frequency.},
   Doi = {10.1007/s10827-008-0117-3},
   Key = {fds328480}
}

@article{fds328491,
   Author = {Battaglia, D and Brunel, N and Hansel, D},
   Title = {Temporal decorrelation of collective oscillations in neural
             networks with local inhibition and long-range
             excitation.},
   Journal = {Physical Review Letters},
   Volume = {99},
   Number = {23},
   Pages = {238106},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1103/physrevlett.99.238106},
   Abstract = {We consider two neuronal networks coupled by long-range
             excitatory interactions. Oscillations in the gamma frequency
             band are generated within each network by local inhibition.
             When long-range excitation is weak, these oscillations phase
             lock with a phase shift dependent on the strength of local
             inhibition. Increasing the strength of long-range excitation
             induces a transition to chaos via period doubling or
             quasiperiodic scenarios. In the chaotic regime, oscillatory
             activity undergoes fast temporal decorrelation. The
             generality of these dynamical properties is assessed in
             firing-rate models as well as in large networks of
             conductance-based neurons.},
   Doi = {10.1103/physrevlett.99.238106},
   Key = {fds328491}
}

@article{fds328486,
   Author = {Roxin, A and Hakim, V and Brunel, N},
   Title = {The statistics of repeating patterns of cortical activity
             can be reproduced by a model network of stochastic binary
             neurons.},
   Journal = {The Journal of neuroscience : the official journal of the
             Society for Neuroscience},
   Volume = {28},
   Number = {42},
   Pages = {10734-10745},
   Year = {2008},
   Month = {October},
   url = {http://dx.doi.org/10.1523/jneurosci.1016-08.2008},
   Abstract = {Calcium imaging of the spontaneous activity in cortical
             slices has revealed repeating spatiotemporal patterns of
             transitions between so-called down states and up states
             (Ikegaya et al., 2004). Here we fit a model network of
             stochastic binary neurons to data from these experiments,
             and in doing so reproduce the distributions of such
             patterns. We use two versions of this model: (1) an
             unconnected network in which neurons are activated as
             independent Poisson processes; and (2) a network with an
             interaction matrix, estimated from the data, representing
             effective interactions between the neurons. The unconnected
             model (model 1) is sufficient to account for the statistics
             of repeating patterns in 11 of the 15 datasets studied.
             Model 2, with interactions between neurons, is required to
             account for pattern statistics of the remaining four. Three
             of these four datasets are the ones that contain the largest
             number of transitions, suggesting that long datasets are in
             general necessary to render interactions statistically
             visible. We then study the topology of the matrix of
             interactions estimated for these four datasets. For three of
             the four datasets, we find sparse matrices with long-tailed
             degree distributions and an overrepresentation of certain
             network motifs. The remaining dataset exhibits a strongly
             interconnected, spatially localized subgroup of neurons. In
             all cases, we find that interactions between neurons
             facilitate the generation of long patterns that do not
             repeat exactly.},
   Doi = {10.1523/jneurosci.1016-08.2008},
   Key = {fds328486}
}

@article{fds328536,
   Author = {Ninio, J and Brunel, N},
   Title = {Time to detect a single difference between two correlated
             images},
   Journal = {Perception},
   Volume = {25},
   Pages = {89-89},
   Year = {1996},
   Key = {fds328536}
}

@article{fds328531,
   Author = {Brunel, N and Ninio, J},
   Title = {Time to detect the difference between two images presented
             side by side.},
   Journal = {Cognitive Brain Research},
   Volume = {5},
   Number = {4},
   Pages = {273-282},
   Year = {1997},
   Month = {June},
   url = {http://dx.doi.org/10.1016/s0926-6410(97)00003-7},
   Abstract = {The time to locate a difference between two artificial
             images presented side by side on a CRT screen was studied as
             a function of their complexity. The images were square
             lattices of black or white squares or quadrangles, in some
             cases delineated by a blue grid. Each pair differed at a
             single position, chosen at random. For images of size N x N,
             the median reaction time varied as cN2, from N = 3-15, with
             c being around 50 ms in the absence of grid (i.e., when the
             quadrangles were associated into continuous shapes). For N <
             or = 9, when the lattice was made irregular, performance did
             not deteriorate, up to a rather high level of irregularity.
             Furthermore, the presence of uncorrelated distortions in the
             left and right images did not affect performance for N < or
             = 6. In the presence of a grid, the reaction times were on
             average higher by 20%. The results taken together indicate
             that the detection of differences does not proceed on a
             point-by-point basis and must be mediated by some abstract
             shape analysis, in agreement with current views on
             short-term visual memory (e.g., Phillips, W.A., On the
             distinction between sensory storage and short-term visual
             memory, Percept. Psychophys., 16 (1974) 283-290 [13]). In
             complementary experiments, the subjects had to judge whether
             two images presented side by side were the same or
             different, with N varying from 1 to 5. For N < 3, the same
             and the different responses were similar in all their
             statistical aspects. For N > or = 4, the "same" responses
             took a significantly larger time than the "different"
             responses and were accompanied by a significant increase in
             errors. The qualitative change from N = 3 to N = 4 is
             interpreted as a shift from a "single inspection" analysis
             to an obligatory scanning procedure. On the whole, we
             suggest that visual information in our simultaneous
             comparison task is extracted by chunks of about 12 +/- 3
             bits, and that the visual processing and matching tasks take
             about 50 ms per couple of quadrangles. In Section 4, we
             compare these values to the values obtained through other
             experimental paradigms.},
   Doi = {10.1016/s0926-6410(97)00003-7},
   Key = {fds328531}
}

@article{fds328447,
   Author = {Titley, HK and Brunel, N and Hansel, C},
   Title = {Toward a Neurocentric View of Learning.},
   Journal = {Neuron},
   Volume = {95},
   Number = {1},
   Pages = {19-32},
   Year = {2017},
   Month = {July},
   url = {http://dx.doi.org/10.1016/j.neuron.2017.05.021},
   Abstract = {Synaptic plasticity (e.g., long-term potentiation [LTP]) is
             considered the cellular correlate of learning. Recent
             optogenetic studies on memory engram formation assign a
             critical role in learning to suprathreshold activation of
             neurons and their integration into active engrams ("engram
             cells"). Here we review evidence that ensemble integration
             may result from LTP but also from cell-autonomous changes in
             membrane excitability. We propose that synaptic plasticity
             determines synaptic connectivity maps, whereas intrinsic
             plasticity-possibly separated in time-amplifies neuronal
             responsiveness and acutely drives engram integration. Our
             proposal marks a move away from an exclusively
             synaptocentric toward a non-exclusive, neurocentric view of
             learning.},
   Doi = {10.1016/j.neuron.2017.05.021},
   Key = {fds328447}
}

@article{fds328475,
   Author = {Mazzoni, A and Whittingstall, K and Brunel, N and Logothetis, NK and Panzeri, S},
   Title = {Understanding the relationships between spike rate and
             delta/gamma frequency bands of LFPs and EEGs using a local
             cortical network model.},
   Journal = {NeuroImage},
   Volume = {52},
   Number = {3},
   Pages = {956-972},
   Year = {2010},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.neuroimage.2009.12.040},
   Abstract = {Despite the widespread use of EEGs to measure the
             large-scale dynamics of the human brain, little is known on
             how the dynamics of EEGs relates to that of the underlying
             spike rates of cortical neurons. However, progress was made
             by recent neurophysiological experiments reporting that EEG
             delta-band phase and gamma-band amplitude reliably predict
             some complementary aspects of the time course of spikes of
             visual cortical neurons. To elucidate the mechanisms behind
             these findings, here we hypothesize that the EEG delta phase
             reflects shifts of local cortical excitability arising from
             slow fluctuations in the network input due to entrainment to
             sensory stimuli or to fluctuations in ongoing activity, and
             that the resulting local excitability fluctuations modulate
             both the spike rate and the engagement of
             excitatory-inhibitory loops producing gamma-band
             oscillations. We quantitatively tested these hypotheses by
             simulating a recurrent network of excitatory and inhibitory
             neurons stimulated with dynamic inputs presenting temporal
             regularities similar to that of thalamic responses during
             naturalistic visual stimulation and during spontaneous
             activity. The network model reproduced in detail the
             experimental relationships between spike rate and EEGs, and
             suggested that the complementariness of the prediction of
             spike rates obtained from EEG delta phase or gamma amplitude
             arises from nonlinearities in the engagement of
             excitatory-inhibitory loops and from temporal modulations in
             the amplitude of the network input, which respectively limit
             the predictability of spike rates from gamma amplitude or
             delta phase alone. The model suggested also ways to improve
             and extend current algorithms for online prediction of spike
             rates from EEGs.},
   Doi = {10.1016/j.neuroimage.2009.12.040},
   Key = {fds328475}
}

@article{fds328481,
   Author = {Zillmer, R and Brunel, N and Hansel, D},
   Title = {Very long transients, irregular firing, and chaotic dynamics
             in networks of randomly connected inhibitory
             integrate-and-fire neurons.},
   Journal = {Physical Review E - Statistical, Nonlinear, and Soft Matter
             Physics},
   Volume = {79},
   Number = {3 Pt 1},
   Pages = {031909},
   Year = {2009},
   Month = {March},
   url = {http://dx.doi.org/10.1103/physreve.79.031909},
   Abstract = {We present results of an extensive numerical study of the
             dynamics of networks of integrate-and-fire neurons connected
             randomly through inhibitory interactions. We first consider
             delayed interactions with infinitely fast rise and decay.
             Depending on the parameters, the network displays transients
             which are short or exponentially long in the network size.
             At the end of these transients, the dynamics settle on a
             periodic attractor. If the number of connections per neuron
             is large ( approximately 1000) , this attractor is a cluster
             state with a short period. In contrast, if the number of
             connections per neuron is small ( approximately 100) , the
             attractor has complex dynamics and very long period. During
             the long transients the neurons fire in a highly irregular
             manner. They can be viewed as quasistationary states in
             which, depending on the coupling strength, the pattern of
             activity is asynchronous or displays population
             oscillations. In the first case, the average firing rates
             and the variability of the single-neuron activity are well
             described by a mean-field theory valid in the thermodynamic
             limit. Bifurcations of the long transient dynamics from
             asynchronous to synchronous activity are also well predicted
             by this theory. The transient dynamics display features
             reminiscent of stable chaos. In particular, despite being
             linearly stable, the trajectories of the transient dynamics
             are destabilized by finite perturbations as small as O(1/N)
             . We further show that stable chaos is also observed for
             postsynaptic currents with finite decay time. However, we
             report in this type of network that chaotic dynamics
             characterized by positive Lyapunov exponents can also be
             observed. We show in fact that chaos occurs when the decay
             time of the synaptic currents is long compared to the
             synaptic delay, provided that the network is sufficiently
             large.},
   Doi = {10.1103/physreve.79.031909},
   Key = {fds328481}
}

@article{fds328492,
   Author = {Barbour, B and Brunel, N and Hakim, V and Nadal, J-P},
   Title = {What can we learn from synaptic weight distributions?},
   Journal = {Trends in Neurosciences},
   Volume = {30},
   Number = {12},
   Pages = {622-629},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1016/j.tins.2007.09.005},
   Abstract = {Much research effort into synaptic plasticity has been
             motivated by the idea that modifications of synaptic weights
             (or strengths or efficacies) underlie learning and memory.
             Here, we examine the possibility of exploiting the
             statistics of experimentally measured synaptic weights to
             deduce information about the learning process. Analysing
             distributions of synaptic weights requires a theoretical
             framework to interpret the experimental measurements, but
             the results can be unexpectedly powerful, yielding strong
             constraints on possible learning theories as well as
             information that is difficult to obtain by other means, such
             as the information storage capacity of a cell. We review the
             available experimental and theoretical techniques as well as
             important open issues.},
   Doi = {10.1016/j.tins.2007.09.005},
   Key = {fds328492}
}

@article{fds328510,
   Author = {Brunel, N and Wang, X-J},
   Title = {What determines the frequency of fast network oscillations
             with irregular neural discharges? I. Synaptic dynamics and
             excitation-inhibition balance.},
   Journal = {Journal of neurophysiology},
   Volume = {90},
   Number = {1},
   Pages = {415-430},
   Year = {2003},
   Month = {July},
   url = {http://dx.doi.org/10.1152/jn.01095.2002},
   Abstract = {When the local field potential of a cortical network
             displays coherent fast oscillations ( approximately 40-Hz
             gamma or approximately 200-Hz sharp-wave ripples), the spike
             trains of constituent neurons are typically irregular and
             sparse. The dichotomy between rhythmic local field and
             stochastic spike trains presents a challenge to the theory
             of brain rhythms in the framework of coupled oscillators.
             Previous studies have shown that when noise is large and
             recurrent inhibition is strong, a coherent network rhythm
             can be generated while single neurons fire intermittently at
             low rates compared to the frequency of the oscillation.
             However, these studies used too simplified synaptic kinetics
             to allow quantitative predictions of the population rhythmic
             frequency. Here we show how to derive quantitatively the
             coherent oscillation frequency for a randomly connected
             network of leaky integrate-and-fire neurons with realistic
             synaptic parameters. In a noise-dominated interneuronal
             network, the oscillation frequency depends much more on the
             shortest synaptic time constants (delay and rise time) than
             on the longer synaptic decay time, and approximately 200-Hz
             frequency can be realized with synaptic time constants taken
             from slice data. In a network composed of both interneurons
             and excitatory cells, the rhythmogenesis is a compromise
             between two scenarios: the fast purely interneuronal
             mechanism, and the slower feedback mechanism (relying on the
             excitatory-inhibitory loop). The properties of the rhythm
             are determined essentially by the ratio of time scales of
             excitatory and inhibitory currents and by the balance
             between the mean recurrent excitation and inhibition. Faster
             excitation than inhibition, or a higher excitation/inhibition
             ratio, favors the feedback loop and a much slower
             oscillation (typically in the gamma range).},
   Doi = {10.1152/jn.01095.2002},
   Key = {fds328510}
}