Psychology and Neuroscience Faculty Database
Psychology and Neuroscience
Arts & Sciences
Duke University

 HOME > Arts & Sciences > pn > Faculty    Search Help Login pdf version printable version 

Publications of Elika Bergelson    :chronological  alphabetical  combined listing:

%% Journal Articles   
@article{fds376037,
   Author = {Moore, C and Bergelson, E},
   Title = {Wordform variability in infants' language environment and
             its effects on early word learning.},
   Journal = {Cognition},
   Volume = {245},
   Pages = {105694},
   Year = {2024},
   Month = {April},
   url = {http://dx.doi.org/10.1016/j.cognition.2023.105694},
   Abstract = {Most research regarding early word learning in English tends
             to make the simplifying assumption that there exists a
             one-to-one mapping between concrete objects and their
             labels. In the current work, we provide evidence that runs
             counter to this assumption, aligning English with more
             morphologically-rich languages. We suggest that even in a
             morphologically-poor language like English, real world
             language input to infants does not provide tidy 1-to-1
             mappings. Instead, infants encounter many variant wordforms
             for familiar nouns (e.g. dog∼doggy∼dogs). We explore
             this wordform variability in 44 English-learning infants'
             naturalistic environments using a longitudinal corpus of
             infant-available speech. We look at both the frequency and
             composition of wordform variability. We find two broad
             categories of variability: referent-changing alterations,
             where words were pluralized or compounded (e.g.
             coat∼raincoats); and wordplay, where words changed form
             without a notable change in referent (e.g. bird∼birdie).
             We further find that wordplay occurs with a limited number
             of lemmas that are usually early-learned, high-frequency,
             and shorter. When looking at all wordform variability, we
             find that individual words with higher levels of wordform
             variability are learned earlier than words with fewer
             wordforms, over and above the effect of frequency.},
   Doi = {10.1016/j.cognition.2023.105694},
   Key = {fds376037}
}

@article{fds375235,
   Author = {Laing, C and Bergelson, E},
   Title = {Analyzing the effect of sibling number on input and output
             in the first 18 months.},
   Journal = {Infancy : the official journal of the International Society
             on Infant Studies},
   Volume = {29},
   Number = {2},
   Pages = {175-195},
   Publisher = {WILEY},
   Year = {2024},
   Month = {March},
   url = {http://dx.doi.org/10.1111/infa.12578},
   Abstract = {Prior research suggests that across a wide range of
             cognitive, educational, and health-based measures,
             first-born children outperform their later-born peers.
             Expanding on this literature using naturalistic
             home-recorded data and parental vocabulary reports, we find
             that early language outcomes vary by number of siblings in a
             sample of 43 English-learning U.S. children from mid-to-high
             socioeconomic status homes. More specifically, we find that
             children in our sample with two or more-but not one-older
             siblings had smaller productive vocabularies at 18 months,
             and heard less input from caregivers across several measures
             than their peers with less than two siblings. We discuss
             implications regarding what infants experience and learn
             across a range of family sizes in infancy.},
   Doi = {10.1111/infa.12578},
   Key = {fds375235}
}

@article{fds375505,
   Author = {Campbell, E and Casillas, R and Bergelson, E},
   Title = {The role of vision in the acquisition of words: Vocabulary
             development in blind toddlers.},
   Journal = {Developmental science},
   Pages = {e13475},
   Year = {2024},
   Month = {January},
   url = {http://dx.doi.org/10.1111/desc.13475},
   Abstract = {What is vision's role in driving early word production? To
             answer this, we assessed parent-report vocabulary
             questionnaires administered to congenitally blind children
             (N = 40, Mean age = 24 months [R: 7-57 months]) and
             compared the size and contents of their productive
             vocabulary to those of a large normative sample of sighted
             children (N = 6574). We found that on average, blind
             children showed a roughly half-year vocabulary delay
             relative to sighted children, amid considerable variability.
             However, the content of blind and sighted children's
             vocabulary was statistically indistinguishable in word
             length, part of speech, semantic category, concreteness,
             interactiveness, and perceptual modality. At a finer-grained
             level, we also found that words' perceptual properties
             intersect with children's perceptual abilities. Our findings
             suggest that while an absence of visual input may initially
             make vocabulary development more difficult, the content of
             the early productive vocabulary is largely resilient to
             differences in perceptual access. RESEARCH HIGHLIGHTS:
             Infants and toddlers born blind (with no other diagnoses)
             show a 7.5 month productive vocabulary delay on average,
             with wide variability. Across the studied age range
             (7-57 months), vocabulary delays widened with age. Blind
             and sighted children's early vocabularies contain similar
             distributions of word lengths, parts of speech, semantic
             categories, and perceptual modalities. Blind children (but
             not sighted children) were more likely to say visual words
             which could also be experienced through other
             senses.},
   Doi = {10.1111/desc.13475},
   Key = {fds375505}
}

@article{fds373687,
   Author = {Meylan, SC and Foushee, R and Wong, NH and Bergelson, E and Levy,
             RP},
   Title = {How adults understand what young children
             say.},
   Journal = {Nature human behaviour},
   Volume = {7},
   Number = {12},
   Pages = {2111-2125},
   Year = {2023},
   Month = {December},
   url = {http://dx.doi.org/10.1038/s41562-023-01698-3},
   Abstract = {Children's early speech often bears little resemblance to
             that of adults, and yet parents and other caregivers are
             able to interpret that speech and react accordingly. Here we
             investigate how adult listeners' inferences reflect
             sophisticated beliefs about what children are trying to
             communicate, as well as how children are likely to pronounce
             words. Using a Bayesian framework for modelling spoken word
             recognition, we find that computational models can replicate
             adult interpretations of children's speech only when they
             include strong, context-specific prior expectations about
             the messages that children will want to communicate. This
             points to a critical role of adult cognitive processes in
             supporting early communication and reveals how children can
             actively prompt adults to take actions on their behalf even
             when they have only a nascent understanding of the adult
             language. We discuss the wide-ranging implications of the
             powerful listening capabilities of adults for theories of
             first language acquisition.},
   Doi = {10.1038/s41562-023-01698-3},
   Key = {fds373687}
}

@article{fds374572,
   Author = {Bergelson, E and Soderstrom, M and Schwarz, I-C and Rowland, CF and Ramírez-Esparza, N and R Hamrick and L and Marklund, E and Kalashnikova, M and Guez, A and Casillas, M and Benetti, L and Alphen,
             PV and Cristia, A},
   Title = {Everyday language input and production in 1,001 children
             from six continents.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {120},
   Number = {52},
   Pages = {e2300671120},
   Year = {2023},
   Month = {December},
   url = {http://dx.doi.org/10.1073/pnas.2300671120},
   Abstract = {Language is a universal human ability, acquired readily by
             young children, who otherwise struggle with many basics of
             survival. And yet, language ability is variable across
             individuals. Naturalistic and experimental observations
             suggest that children's linguistic skills vary with factors
             like socioeconomic status and children's gender. But which
             factors really influence children's day-to-day language use?
             Here, we leverage speech technology in a big-data approach
             to report on a unique cross-cultural and diverse data set:
             >2,500 d-long, child-centered audio-recordings of 1,001 2-
             to 48-mo-olds from 12 countries spanning six continents
             across urban, farmer-forager, and subsistence-farming
             contexts. As expected, age and language-relevant clinical
             risks and diagnoses predicted how much speech (and
             speech-like vocalization) children produced. Critically, so
             too did adult talk in children's environments: Children who
             heard more talk from adults produced more speech. In
             contrast to previous conclusions based on more limited
             sampling methods and a different set of language proxies,
             socioeconomic status (operationalized as maternal education)
             was not significantly associated with children's productions
             over the first 4 y of life, and neither were gender or
             multilingualism. These findings from large-scale
             naturalistic data advance our understanding of which factors
             are robust predictors of variability in the speech behaviors
             of young learners in a wide range of everyday
             contexts.},
   Doi = {10.1073/pnas.2300671120},
   Key = {fds374572}
}

@article{fds370372,
   Author = {Liu, J and Hilton, CB and Bergelson, E and Mehr, SA},
   Title = {Language experience predicts music processing in a
             half-million speakers of fifty-four languages.},
   Journal = {Current biology : CB},
   Volume = {33},
   Number = {10},
   Pages = {1916-1925.e4},
   Publisher = {Elsevier BV},
   Year = {2023},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.cub.2023.03.067},
   Abstract = {Tonal languages differ from other languages in their use of
             pitch (tones) to distinguish words. Lifelong experience
             speaking and hearing tonal languages has been argued to
             shape auditory processing in ways that generalize beyond the
             perception of linguistic pitch to the perception of pitch in
             other domains like music. We conducted a meta-analysis of
             prior studies testing this idea, finding moderate evidence
             supporting it. But prior studies were limited by mostly
             small sample sizes representing a small number of languages
             and countries, making it challenging to disentangle the
             effects of linguistic experience from variability in music
             training, cultural differences, and other potential
             confounds. To address these issues, we used web-based
             citizen science to assess music perception skill on a global
             scale in 34,034 native speakers of 19 tonal languages (e.g.,
             Mandarin, Yoruba). We compared their performance to 459,066
             native speakers of other languages, including 6
             pitch-accented (e.g., Japanese) and 29 non-tonal languages
             (e.g., Hungarian). Whether or not participants had taken
             music lessons, native speakers of all 19 tonal languages had
             an improved ability to discriminate musical melodies on
             average, relative to speakers of non-tonal languages. But
             this improvement came with a trade-off: tonal language
             speakers were also worse at processing the musical beat. The
             results, which held across native speakers of many diverse
             languages and were robust to geographic and demographic
             variation, demonstrate that linguistic experience shapes
             music perception, with implications for relations between
             music, language, and culture in the human
             mind.},
   Doi = {10.1016/j.cub.2023.03.067},
   Key = {fds370372}
}

@article{fds367924,
   Author = {Bulgarelli, F and Bergelson, E},
   Title = {Talker variability is not always the right noise: 14 month
             olds struggle to learn dissimilar word-object pairs under
             talker variability conditions.},
   Journal = {Journal of experimental child psychology},
   Volume = {227},
   Pages = {105575},
   Year = {2023},
   Month = {March},
   url = {http://dx.doi.org/10.1016/j.jecp.2022.105575},
   Abstract = {Seminal work by Stager & Werker (1997) finds that
             14-month-olds can rapidly learn two word-object pairings if
             the words are distinct (e.g. "neem" and "lif") but not
             similar (e.g. the minimal pair "bih" and "dih"). More
             recently, studies have found that adding talker variability
             during exposure to new word-object pairs lets 14-month-olds
             succeed on the more challenging minimal pair task,
             presumably due to talker variability highlighting the
             "relevant" consistencies between the similar words (Rost &
             McMurray, 2009; Galle et al., 2015; Hohle et al., 2020). It
             remains an open question, however, whether talker
             variability would be similarly useful for learning new
             word-object pairings when the words themselves are already
             distinct, or whether instead this extra variability may
             extinguish learning due to increased task demands. We find
             evidence for the latter. Namely, in our sample of 54
             English-learning 14-month-olds, training infants on two
             word-object pairings (e.g. "neem" with a dog toy and "lof"
             with a kitchen tool) only led them to notice when the words
             and objects were switched if they were trained with
             single-speaker identical word tokens. When the training
             featured talker variability (from one or multiple talkers)
             infants failed to learn the pairings. We suggest that when
             talker variability is not necessary to highlight the
             invariant differences between similar words, it may actually
             increase task difficulty, making it harder for infants to
             determine what to attend to in the earliest phases of word
             learning.},
   Doi = {10.1016/j.jecp.2022.105575},
   Key = {fds367924}
}

@article{fds376090,
   Author = {Lavechin, M and Metais, M and Titeux, H and Boissonnet, A and Copet, J and Riviere, M and Bergelson, E and Cristia, A and Dupoux, E and Bredin,
             H},
   Title = {Brouhaha: Multi-Task Training for Voice Activity Detection,
             Speech-to-Noise Ratio, and C50 Room Acoustics
             Estimation},
   Journal = {2023 IEEE Automatic Speech Recognition and Understanding
             Workshop, ASRU 2023},
   Year = {2023},
   Month = {January},
   ISBN = {9798350306897},
   url = {http://dx.doi.org/10.1109/ASRU57964.2023.10389718},
   Abstract = {Most automatic speech processing systems register degraded
             performance when applied to noisy or reverberant speech. But
             how can one tell whether speech is noisy or reverberant? We
             propose Brouhaha, a neural network jointly trained to
             extract speech/non-speech segments, speech-to-noise ratios,
             and C50 room acoustics from single-channel recordings.
             Brouhaha is trained using a data-driven approach in which
             noisy and reverberant audio segments are synthesized. We
             first evaluate its performance and demonstrate that the
             proposed multi-task regime is beneficial. We then present
             two scenarios illustrating how Brouhaha can be used on
             naturally noisy and reverberant data: 1) to investigate the
             errors made by a speaker diarization model (pyannote.audio);
             and 2) to assess the reliability of an automatic speech
             recognition model (Whisper from OpenAI). Both our pipeline
             and a pretrained model are open source and shared with the
             speech community.},
   Doi = {10.1109/ASRU57964.2023.10389718},
   Key = {fds376090}
}

@article{fds364057,
   Author = {Moore, C and Bergelson, E},
   Title = {Examining the roles of regularity and lexical class in
             18–26-month-olds’ representations of how words
             sound},
   Journal = {Journal of Memory and Language},
   Volume = {126},
   Year = {2022},
   Month = {October},
   url = {http://dx.doi.org/10.1016/j.jml.2022.104337},
   Abstract = {By around 12 months, infants have well-specified phonetic
             representations for the nouns they understand, for instance
             looking less at a car upon hearing ‘cur’ than ‘car’
             (Swingley and Aslin, 2002). Here we test whether such
             high-fidelity representations extend to irregular nouns, and
             regular and irregular verbs. A corpus analysis confirms the
             intuition that irregular verbs are far more common than
             irregular nouns in speech to young children. Two eyetracking
             experiments then test whether toddlers are sensitive to
             mispronunciation in regular and irregular nouns (Experiment
             1) and verbs (Experiment 2). For nouns, we find a
             mispronunciation effect and no regularity effect in
             18-month-olds. For verbs, in Experiment 2a, we find only a
             regularity effect and no mispronunciation effect in
             18-month-olds, though toddlers’ poor comprehension overall
             limits interpretation. Finally, in Experiment 2b we find a
             mispronunciation effect and no regularity effect in
             26-month-olds. The interlocking roles of lexical class and
             regularity for wordform representations and early word
             learning are discussed.},
   Doi = {10.1016/j.jml.2022.104337},
   Key = {fds364057}
}

@article{fds364058,
   Author = {Campbell, EE and Bergelson, E},
   Title = {Making sense of sensory language: Acquisition of sensory
             knowledge by individuals with congenital sensory
             impairments.},
   Journal = {Neuropsychologia},
   Volume = {174},
   Pages = {108320},
   Publisher = {Elsevier BV},
   Year = {2022},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.neuropsychologia.2022.108320},
   Abstract = {The present article provides a narrative review on how
             language communicates sensory information and how knowledge
             of sight and sound develops in individuals born deaf or
             blind. Studying knowledge of the perceptually inaccessible
             sensory domain for these populations offers a lens into how
             humans learn about that which they cannot perceive. We first
             review the linguistic strategies within language that
             communicate sensory information. Highlighting the power of
             language to shape knowledge, we next review the detailed
             knowledge of sensory information by individuals with
             congenital sensory impairments, limitations therein, and
             neural representations of imperceptible phenomena. We
             suggest that the acquisition of sensory knowledge is
             supported by language, experience with multiple perceptual
             domains, and cognitive and social abilities which mature
             over the first years of life, both in individuals with and
             without sensory impairment. We conclude by proposing a
             developmental trajectory for acquiring sensory knowledge in
             the absence of sensory perception.},
   Doi = {10.1016/j.neuropsychologia.2022.108320},
   Key = {fds364058}
}

@article{fds360576,
   Author = {Dailey, S and Bergelson, E},
   Title = {Language input to infants of different socioeconomic
             statuses: A quantitative meta-analysis.},
   Journal = {Developmental science},
   Volume = {25},
   Number = {3},
   Pages = {e13192},
   Year = {2022},
   Month = {May},
   url = {http://dx.doi.org/10.1111/desc.13192},
   Abstract = {For the past 25 years, researchers have investigated
             language input to children from high- and low-socioeconomic
             status (SES) families. Hart and Risley first reported a "30
             Million Word Gap" between high-SES and low-SES children.
             More recent studies have challenged the size or even
             existence of this gap. The present study is a quantitative
             meta-analysis on socioeconomic differences in language input
             to young children, which aims to systematically integrate
             decades of research on this topic. We analyzed 19 studies
             and found a significant effect of SES on language input
             quantity. However, this effect was moderated by the type of
             language included in language quantity measures: studies
             that include only child-directed speech in their language
             measures find a large SES difference, while studies that
             include all speech in a child's environment find no effect
             of SES. These results support recent work suggesting that
             methodological decisions can affect researchers' estimates
             of the "word gap." Overall, we find that young children from
             low-SES homes heard less child-directed speech than children
             from mid- to high-SES homes, though this difference was much
             smaller than Hart & Risley's "30 Million Word Gap." Finally,
             we underscore the need for more cross-cultural work on
             language development and the forces that may contribute to
             it, highlighting the opportunity for better integration of
             observational, experimental, and intervention-based approaches.},
   Doi = {10.1111/desc.13192},
   Key = {fds360576}
}

@article{fds362823,
   Author = {Campbell, E and Bergelson, E},
   Title = {Characterizing North Carolina's Deaf and Hard of Hearing
             Infants and Toddlers: Predictors of Vocabulary, Diagnosis,
             and Intervention.},
   Journal = {Journal of speech, language, and hearing research :
             JSLHR},
   Volume = {65},
   Number = {5},
   Pages = {1894-1905},
   Year = {2022},
   Month = {May},
   url = {http://dx.doi.org/10.1044/2022_jslhr-21-00245},
   Abstract = {<h4>Purpose</h4>This study sought to (a) characterize the
             demographic, audiological, and intervention variability in a
             population of Deaf and Hard of Hearing (DHH) children
             receiving state services for hearing loss; (b) identify
             predictors of vocabulary delays; and (c) evaluate factors
             influencing the success and timing of early identification
             and intervention efforts at a state level.<h4>Method</h4>One
             hundred DHH infants and toddlers (aged 4-36 months) enrolled
             in early intervention completed the MacArthur-Bates
             Communicative Development Inventories, and detailed
             information about their audiological and clinical history
             was collected. We examined the influence of demographic,
             clinical, and audiological factors on vocabulary outcomes
             and early intervention efforts.<h4>Results</h4>We found that
             this sample showed spoken language vocabulary delays
             (production) relative to hearing peers and showed room for
             improvement in rates of early diagnosis and intervention.
             These delays in vocabulary and early support services were
             predicted by an overlapping subset of hearing-, health-, and
             home-related variables.<h4>Conclusions</h4>In a diverse
             sample of DHH children receiving early intervention, we
             identify variables that predict delays in vocabulary and
             early support services, which reflected <i>both</i>
             dimensions that are immutable, and those that clinicians and
             caretakers can potentially alter. We provide a discussion on
             the implications for clinical practice.<h4>Supplemental
             material</h4>https://doi.org/10.23641/asha.19449839.},
   Doi = {10.1044/2022_jslhr-21-00245},
   Key = {fds362823}
}

@article{fds361385,
   Author = {Bulgarelli, F and Bergelson, E},
   Title = {Talker variability shapes early word representations in
             English-learning 8-month-olds.},
   Journal = {Infancy : the official journal of the International Society
             on Infant Studies},
   Volume = {27},
   Number = {2},
   Pages = {341-368},
   Year = {2022},
   Month = {March},
   url = {http://dx.doi.org/10.1111/infa.12452},
   Abstract = {Infants must form appropriately specific representations of
             how words sound and what they mean. Previous research
             suggests that while 8-month-olds are learning words, they
             struggle with recognizing different-sounding instances of
             words (e.g., from new talkers) and with rejecting incorrect
             pronunciations. We asked how adding talker variability
             during learning may change infants' ability to learn and
             recognize words. Monolingual English-learning 7- to
             9-month-olds heard a single novel word paired with an object
             in either a "no variability," "within-talker variability,"
             or "between-talker variability" habituation. We then tested
             whether infants formed appropriately specific
             representations by changing the talker (Experiment 1a) or
             mispronouncing the word (Experiment 2) and by changing the
             trained word or object altogether (both experiments). Talker
             variability influenced learning. Infants trained with
             no-talker variability learned the word-object link, but
             failed to recognize the word trained by a new talker, and
             were insensitive to the mispronunciation. Infants trained
             with talker variability dishabituated only to the new
             object, exhibiting difficulty forming the word-object link.
             Neither pattern is adult-like. Results are reported for both
             in-lab and Zoom participants. Implications for the role of
             talker variability in early word learning are
             discussed.},
   Doi = {10.1111/infa.12452},
   Key = {fds361385}
}

@article{fds376603,
   Author = {Casey, K and Elliott, M and Mickiewicz, E and Mandujano, AS and Shorter,
             K and Duquette, M and Bergelson, E and Casillas, M},
   Title = {Sticks, leaves, buckets, and bowls: Distributional patterns
             of children's at-home object handling in two subsistence
             societies},
   Journal = {Proceedings of the 44th Annual Meeting of the Cognitive
             Science Society: Cognitive Diversity, CogSci
             2022},
   Pages = {927-933},
   Year = {2022},
   Month = {January},
   Abstract = {Object-centric interactions provide rich learning moments
             for young children, including opportunities to discover word
             meanings. Children's first-person object handling
             experiences, in particular, form a key source of input-one
             that varies across cultures and across development. Using
             daylong photo streams from child-worn cameras, we analyze
             >17k images to identify the frequency and targets of child
             object handling across the first four years in two
             small-scale subsistence farming communities on opposite
             sides of the globe (Rossel Papuan and Tseltal Mayan).
             Overall, we see general consistency in the distribution of
             object categories (e.g., consumables, mealtime tools,
             natural objects, etc.) handled by children across cultures
             and age, likely reflecting stable properties of children's
             physical environments and day-to-day routines. However, the
             exact objects available to children vary both within and
             across communities and diversify with age. These various
             distributions of handling patterns are discussed in their
             relation to potential consequences for early
             learning.},
   Key = {fds376603}
}

@article{fds361950,
   Author = {Meylan, SC and Bergelson, E},
   Title = {Learning Through Processing: Toward an Integrated Approach
             to Early Word Learning.},
   Journal = {Annual review of linguistics},
   Volume = {8},
   Pages = {77-99},
   Year = {2022},
   Month = {January},
   url = {http://dx.doi.org/10.1146/annurev-linguistics-031220-011146},
   Abstract = {Children's linguistic knowledge and the learning mechanisms
             by which they acquire it grow substantially in infancy and
             toddlerhood, yet theories of word learning largely fail to
             incorporate these shifts. Moreover, researchers'
             often-siloed focus on either familiar word recognition or
             novel word learning limits the critical consideration of how
             these two relate. As a step toward a mechanistic theory of
             language acquisition, we present a framework of "learning
             through processing" and relate it to the prevailing methods
             used to assess children's early knowledge of words.
             Incorporating recent empirical work, we posit a specific,
             testable timeline of qualitative changes in the learning
             process in this interval. We conclude with several
             challenges and avenues for building a comprehensive theory
             of early word learning: better characterization of the
             input, reconciling results across approaches, and treating
             lexical knowledge in the nascent grammar with sufficient
             sophistication to ensure generalizability across languages
             and development.},
   Doi = {10.1146/annurev-linguistics-031220-011146},
   Key = {fds361950}
}

@article{fds363192,
   Author = {Casey, K and Elliott, M and Mickiewicz, E and Mandujano, AS and Shorter,
             K and Duquette, M and Bergelson, E and Casillas, M},
   Title = {Sticks, leaves, buckets, and bowls: Distributional patterns
             of children’s at-home object handling in two subsistence
             societies},
   Pages = {927-933},
   Booktitle = {PsyArXiv},
   Year = {2022},
   url = {http://dx.doi.org/10.31234/osf.io/yfnj4},
   Abstract = {Object-centric interactions provide rich learning moments
             for young children, including opportunities to discover word
             meanings. Children's first-person object handling
             experiences, in particular, form a key source of input-one
             that varies across cultures and across development. Using
             daylong photo streams from child-worn cameras, we analyze
             >17k images to identify the frequency and targets of child
             object handling across the first four years in two
             small-scale subsistence farming communities on opposite
             sides of the globe (Rossel Papuan and Tseltal Mayan).
             Overall, we see general consistency in the distribution of
             object categories (e.g., consumables, mealtime tools,
             natural objects, etc.) handled by children across cultures
             and age, likely reflecting stable properties of children's
             physical environments and day-to-day routines. However, the
             exact objects available to children vary both within and
             across communities and diversify with age. These various
             distributions of handling patterns are discussed in their
             relation to potential consequences for early
             learning.},
   Doi = {10.31234/osf.io/yfnj4},
   Key = {fds363192}
}

@article{fds367227,
   Author = {Dailey, S and Bergelson, E},
   Title = {Talking to talkers: Infants’ talk status, but not their
             gender, is related to language input},
   Volume = {94},
   Number = {2},
   Pages = {478-496},
   Booktitle = {PsyArXiv},
   Publisher = {WILEY},
   Year = {2022},
   url = {http://dx.doi.org/10.31234/osf.io/ayx4b},
   Abstract = {Prior research points to gender differences in some early
             language skills, but is inconclusive about the mechanisms at
             play, providing evidence that both infants' early input and
             productions may differ by gender. This study examined the
             linguistic input and early productions of 44 American
             English-learning infants (93% White) in a longitudinal
             sample of home recordings collected at 6-17 months (in
             2014-2016). Girls produced more unique words than boys
             (Cohen's d = .67) and this effect grew with age, but there
             were no significant gender differences in language input
             (d = .22-.24). Instead, caregivers talked more to infants
             who had begun to talk (d = .93-.97), regardless of gender.
             Therefore, prior results highlighting gender-based input
             differences may have been due, at least partly, to this
             talking-to-talkers effect.},
   Doi = {10.31234/osf.io/ayx4b},
   Key = {fds367227}
}

@article{fds366000,
   Author = {Moore, C and Bergelson, E},
   Title = {Wordform variability in infants' language environment and
             its effects on early word learning},
   Year = {2021},
   Month = {September},
   url = {http://dx.doi.org/10.31219/osf.io/n3phk},
   Abstract = {<p>Traditional views of language development suggest that
             noun learning involves creating a one-to-one mapping between
             concrete objects and their labels. In the current work, we
             provide evidence that real world language input to infants
             does not provide such tidy mappings. Instead, infants
             encounter many variant wordforms for familiar nouns(e.g.
             dog∼doggy∼dogs). We explore this wordform variability in
             44 English-learning infants’ naturalistic environments
             using a longitudinal corpus of infant-available speech. We
             looked at both the frequency and composition of wordform
             variability. We found two broad categories of variability:
             morpheme-adding changes, where words were pluralized or
             compounded (e.g. coat∼raincoats); and wordplay, where
             words changed form without any associated change in meaning
             (e.g. bird∼birdie). Wordplay occured with a limited number
             of lemmas that were usually early-learned, highly-frequent,
             and shorter. When looking at all wordform variability, we
             found that individual words with higher levels of wordform
             variability were learned earlier than words with fewer
             wordforms, over and above the effect of frequency.</p>},
   Doi = {10.31219/osf.io/n3phk},
   Key = {fds366000}
}

@article{fds366001,
   Author = {Moore, C and Bergelson, E},
   Title = {Examining the roles of regularity and lexical class in
             18--26-month-olds' representations of how words
             sound},
   Year = {2021},
   Month = {September},
   url = {http://dx.doi.org/10.31219/osf.io/kp7tv},
   Abstract = {<p>By around 12 months, infants have well-specified phonetic
             representations for the nouns they understand, for instance
             looking less at a car upon hearing ‘cur’ than ‘car’
             (Swingley &amp;amp; Aslin, 2002). Here we test whether such
             high-fidelity representations extend to irregular nouns, and
             regular and irregular verbs. A corpus analysis confirms the
             intuition that irregular verbs are far more common than
             irregular nouns in speech to young children. Two eyetracking
             experiments then test whether toddlers are sensitive to
             mispronunciation inregular and irregular nouns (Experiment
             1) and verbs (Experiment 2). For nouns, we find both a
             mispronunciation and regularity effect in 18-month-olds. For
             verbs, in Experiment 2a, we find only a regularity effect
             and no mispronunciation effect in 18-month-olds, though
             toddlers’ poor comprehension overall limits
             interpretation. Finally, in Experiment 2b we find a
             mispronunciation effect and no regularity effect in
             26-month-olds. Implications for wordform representations,
             lexical class, and learning are discussed.</p>},
   Doi = {10.31219/osf.io/kp7tv},
   Key = {fds366001}
}

@article{fds355123,
   Author = {Cychosz, M and Cristia, A and Bergelson, E and Casillas, M and Baudet,
             G and Warlaumont, AS and Scaff, C and Yankowitz, L and Seidl,
             A},
   Title = {Vocal development in a large-scale crosslinguistic
             corpus.},
   Journal = {Developmental science},
   Volume = {24},
   Number = {5},
   Pages = {e13090},
   Year = {2021},
   Month = {September},
   url = {http://dx.doi.org/10.1111/desc.13090},
   Abstract = {This study evaluates whether early vocalizations develop in
             similar ways in children across diverse cultural contexts.
             We analyze data from daylong audio recordings of 49 children
             (1-36 months) from five different language/cultural
             backgrounds. Citizen scientists annotated these recordings
             to determine if child vocalizations contained canonical
             transitions or not (e.g., "ba" vs. "ee"). Results revealed
             that the proportion of clips reported to contain canonical
             transitions increased with age. Furthermore, this proportion
             exceeded 0.15 by around 7 months, replicating and extending
             previous findings on canonical vocalization development but
             using data from the natural environments of a culturally and
             linguistically diverse sample. This work explores how
             crowdsourcing can be used to annotate corpora, helping
             establish developmental milestones relevant to multiple
             languages and cultures. Lower inter-annotator reliability on
             the crowdsourcing platform, relative to more traditional
             in-lab expert annotators, means that a larger number of
             unique annotators and/or annotations are required, and that
             crowdsourcing may not be a suitable method for more
             fine-grained annotation decisions. Audio clips used for this
             project are compiled into a large-scale infant vocalization
             corpus that is available for other researchers to use in
             future work.},
   Doi = {10.1111/desc.13090},
   Key = {fds355123}
}

@article{fds357623,
   Author = {Soderstrom, M and Casillas, M and Bergelson, E and Rosemberg, C and Alam, F and Warlaumont, AS and Bunce, J},
   Title = {Developing a Cross-Cultural Annotation System and MetaCorpus
             for Studying Infants’ Real World Language
             Experience},
   Journal = {Collabra: Psychology},
   Volume = {7},
   Number = {1},
   Publisher = {University of California Press},
   Year = {2021},
   Month = {May},
   url = {http://dx.doi.org/10.1525/collabra.23445},
   Abstract = {Recent issues around reproducibility, best practices, and
             cultural bias impact naturalistic observational approaches
             as much as experimental approaches, but there has been less
             focus on this area. Here, we present a new approach that
             leverages cross-laboratory collaborative, interdisciplinary
             efforts to examine important psychological questions. We
             illustrate this approach with a particular project that
             examines similarities and differences in children’s early
             experiences with language. This project develops a
             comprehensive start-to-finish analysis pipeline by
             developing a flexible and systematic annotation system, and
             implementing this system across a sampling from a
             “metacorpus” of audiorecordings of diverse language
             communities. This resource is publicly available for use,
             sensitive to cultural differences, and flexible to address a
             variety of research questions. It is also uniquely suited
             for use in the development of tools for automated
             analysis.},
   Doi = {10.1525/collabra.23445},
   Key = {fds357623}
}

@article{fds351271,
   Author = {Cristia, A and Lavechin, M and Scaff, C and Soderstrom, M and Rowland,
             C and Räsänen, O and Bunce, J and Bergelson, E},
   Title = {A thorough evaluation of the Language Environment Analysis
             (LENA) system.},
   Journal = {Behavior research methods},
   Volume = {53},
   Number = {2},
   Pages = {467-486},
   Year = {2021},
   Month = {April},
   url = {http://dx.doi.org/10.3758/s13428-020-01393-5},
   Abstract = {In the previous decade, dozens of studies involving
             thousands of children across several research disciplines
             have made use of a combined daylong audio-recorder and
             automated algorithmic analysis called the LENA<sup>Ⓡ</sup>
             system, which aims to assess children's language
             environment. While the system's prevalence in the language
             acquisition domain is steadily growing, there are only
             scattered validation efforts on only some of its key
             characteristics. Here, we assess the LENA<sup>Ⓡ</sup>
             system's accuracy across all of its key measures: speaker
             classification, Child Vocalization Counts (CVC),
             Conversational Turn Counts (CTC), and Adult Word Counts
             (AWC). Our assessment is based on manual annotation of clips
             that have been randomly or periodically sampled out of
             daylong recordings, collected from (a) populations similar
             to the system's original training data (North American
             English-learning children aged 3-36 months), (b) children
             learning another dialect of English (UK), and (c) slightly
             older children growing up in a different linguistic and
             socio-cultural setting (Tsimane' learners in rural Bolivia).
             We find reasonably high accuracy in some measures (AWC,
             CVC), with more problematic levels of performance in others
             (CTC, precision of male adults and other children).
             Statistical analyses do not support the view that
             performance is worse for children who are dissimilar from
             the LENA<sup>Ⓡ</sup> original training set. Whether
             LENA<sup>Ⓡ</sup> results are accurate enough for a given
             research, educational, or clinical application depends
             largely on the specifics at hand. We therefore conclude with
             a set of recommendations to help researchers make this
             determination for their goals.},
   Doi = {10.3758/s13428-020-01393-5},
   Key = {fds351271}
}

@article{fds366002,
   Author = {Dailey, S and Bergelson, E},
   Title = {Language input to infants of different socioeconomic
             statuses: A quantitative meta-analysis},
   Year = {2021},
   Month = {February},
   url = {http://dx.doi.org/10.31219/osf.io/jvdme},
   Abstract = {<p>This Registered Report has been accepted at Developmental
             Science.</p>},
   Doi = {10.31219/osf.io/jvdme},
   Key = {fds366002}
}

@article{fds355371,
   Author = {Meylan, SC and Foushee, R and Bergelson, E and Levy,
             RP},
   Title = {Child-directed Listening: How Caregiver Inference Enables
             Children's Early Verbal Communication},
   Volume = {abs/2102.03462},
   Year = {2021},
   Month = {February},
   Abstract = {How do adults understand children's speech? Children's
             productions over the course of language development often
             bear little resemblance to typical adult pronunciations, yet
             caregivers nonetheless reliably recover meaning from them.
             Here, we employ a suite of Bayesian models of spoken word
             recognition to understand how adults overcome the noisiness
             of child language, showing that communicative success
             between children and adults relies heavily on adult
             inferential processes. By evaluating competing models on
             phonetically-annotated corpora, we show that adults'
             recovered meanings are best predicted by prior expectations
             fitted specifically to the child language environment,
             rather than to typical adult-adult language. After
             quantifying the contribution of this "child-directed
             listening" over developmental time, we discuss the
             consequences for theories of language acquisition, as well
             as the implications for commonly-used methods for assessing
             children's linguistic proficiency.},
   Key = {fds355371}
}

@article{fds367415,
   Author = {Moore, C and Bergelson, E},
   Title = {Listeners can use coarticulation cues to predict an upcoming
             novel word},
   Journal = {Proceedings of the 43rd Annual Meeting of the Cognitive
             Science Society: Comparative Cognition: Animal Minds, CogSci
             2021},
   Pages = {2890-2896},
   Year = {2021},
   Month = {January},
   Abstract = {During lexical access, listeners turn unfolding phonetic
             input into words. We tested how participants interpret words
             that aren’t in their lexicon, either due to their
             coarticulation cues or because they label a novel object. In
             a 2-picture Visual World study, 57 adults saw a familiar
             object and an unfamiliar object, while hearing sentences
             directing their gaze to the target in 3 conditions: with a
             familiar word (“crib”), a novel word (“crig”), or a
             familiar word with coarticulation cueing a novel word
             (“cri(g)b”). When coarticulation cues matched the novel
             word (“cri(g)b”), participants looked more at the
             unfamiliar object than when the cues matched the familiar
             word, suggesting lexical competition can include a novel
             word under appropriate circumstances. When hearing a novel
             word (e.g. “crig”), participants showed two patterns:
             Roughly half looked more at the unfamiliar object, as
             expected, while the rest surprisingly looked more at the
             familiar object. We discuss the interaction of mutual
             exclusivity, phonetic similarity, and coarticulation cues in
             driving lexical access.},
   Key = {fds367415}
}

@article{fds367416,
   Author = {Meylan, SC and Foushee, R and Bergelson, E and Levy,
             RP},
   Title = {Child-directed Listening: How Caregiver Inference Enables
             Children’s Early Verbal Communication},
   Journal = {Proceedings of the 43rd Annual Meeting of the Cognitive
             Science Society: Comparative Cognition: Animal Minds, CogSci
             2021},
   Pages = {854-860},
   Year = {2021},
   Month = {January},
   Abstract = {How do adults understand children’s speech? Children’s
             productions over the course of language development often
             bear little resemblance to typical adult pronunciations, yet
             caregivers nonetheless reliably recover meaning from them.
             Here, we employ a suite of Bayesian models of spoken word
             recognition to understand how adults overcome the noisiness
             of child language, showing that communicative success
             between children and adults relies heavily on adult
             inferential processes. By evaluating competing models on
             phonetically-annotated child language from the Providence
             corpus, we show that adults’ recovered meanings are best
             predicted by prior expectations fitted specifically to the
             child language environment, rather than to typical
             adult-adult language. After quantifying the contribution of
             this “child-directed listening” over developmental time,
             we discuss the consequences for theories of language
             acquisition, as well as the implications for commonly-used
             methods for assessing children’s linguistic
             proficiency.},
   Key = {fds367416}
}

@article{fds361767,
   Author = {Bulgarelli, F and Mielke, J and Bergelson, E},
   Title = {Quantifying Talker Variability in North-American Infants'
             Daily Input.},
   Journal = {Cognitive science},
   Volume = {46},
   Number = {1},
   Pages = {e13075},
   Year = {2021},
   Month = {January},
   url = {http://dx.doi.org/10.1111/cogs.13075},
   Abstract = {Words sound slightly different each time they are said, both
             by the same talker and across talkers. Rather than hurting
             learning, lab studies suggest that talker variability helps
             infants learn similar sounding words. However, very little
             is known about how much variability infants hear within a
             single talker or across talkers in naturalistic input. Here,
             we quantified these types of talker variability for highly
             frequent words spoken to 44 infants, from naturalistic
             recordings sampled longitudinally over a year of life (from
             6 to 17 months). We used non-contrastive acoustic
             measurements (e.g., mean pitch, duration,
             harmonics-to-noise ratio) and holistic measures of sound
             similarity (normalized acoustic distance) to quantify
             acoustic variability. We find three key results. First,
             pitch-based variability was generally lower for infants' top
             talkers than across their other talkers, but overall
             acoustic distance is higher for tokens from the top talker
             versus the others. Second, the amount of acoustic
             variability infants heard could not be predicted from, and
             thus was not redundant with, other properties of the input
             such as the number of talkers or tokens, or proportion of
             speech from particular sources (e.g., women, children,
             electronics). Finally, we find that patterns of pitch-based
             acoustic variability heard in naturalistic input were
             similar to those found with in-lab stimuli that facilitated
             word learning. This large-scale quantification of talker
             variability in infants' everyday input sets the stage for
             linking naturally occurring variability "in the wild" to
             early word learning.},
   Doi = {10.1111/cogs.13075},
   Key = {fds361767}
}

@article{fds356132,
   Author = {Meylan, S and Bergelson, E},
   Title = {Learning through processing: Towards an integrated approach
             to early word learning},
   Year = {2021},
   url = {http://dx.doi.org/10.31234/osf.io/4zxfp},
   Abstract = {<p>Children's linguistic knowledge and the learning
             mechanisms by which they acquire it grow substantially in
             infancy and toddlerhood. And yet, theories of word learning
             largely fail to incorporate these shifts. Moreover,
             researchers' often-siloed focus on either familiar word
             recognition or novel word learning limits the critical
             consideration of how these two relate. As a step towards a
             mechanistic theory of language acquisition, we first present
             a framework of "learning through processing," and relate it
             to the prevailing methods used to assess children's early
             knowledge of words. Incorporating recent empirical work, we
             posit a specific, testable timeline of qualitative changes
             in the learning process in this interval. Finally, we
             conclude with several challenges and avenues for building a
             comprehensive theory of early word learning: better
             characterization of the input, reconciling results across
             approaches, and treating lexical knowledge in the nascent
             grammar with sufficient sophistication to ensure
             generalizability across languages and development.</p>},
   Doi = {10.31234/osf.io/4zxfp},
   Key = {fds356132}
}

@article{fds360577,
   Author = {Bulgarelli, F and Bergelson, E},
   Title = {Talker variability shapes early word representations in
             English-learning 8-month-olds},
   Year = {2021},
   url = {http://dx.doi.org/10.31234/osf.io/rxyjc},
   Abstract = {<p>Infants must form appropriately specific representations
             of how words sound, and what they mean. Previous research
             suggests that while 8-month-olds are learning words, they
             struggle with recognizing different-sounding instances of
             words (e.g. from new talkers), and with rejecting incorrect
             pronunciations. We asked how adding talker variability
             during learning may change infants' ability to learn and
             recognize words. Monolingual English-learning
             7-to-9-month-olds heard a single novel word paired with an
             object in either a 'no variability','within talker
             variability' or 'between talker variability' habituation. We
             then tested whether infants formed appropriately specific
             representations by changing the talker (Experiment 1a) or
             mispronouncing the word (Experiment 2), and by changing the
             trained word or object altogether (both experiments). Talker
             variability influenced learning. Infants trained with no
             talker variability learned the word-object link, but failed
             to recognize the word trained by a new talker, and were
             insensitive to the mispronunciation. Infants trained with
             talker variability dishabituated only to the new object,
             exhibiting difficulty forming the word-object link. Neither
             pattern is adult-like. Results are reported for both in-lab
             and Zoom participants. Implications for the role of talker
             variability in early word learning are discussed.</p>},
   Doi = {10.31234/osf.io/rxyjc},
   Key = {fds360577}
}

@article{fds360578,
   Author = {Bulgarelli, F and Mielke, J and Bergelson, E},
   Title = {Quantifying talker variability in North-American infant's
             daily input},
   Year = {2021},
   url = {http://dx.doi.org/10.31234/osf.io/2xj36},
   Abstract = {<p>Words sound slightly different each time they are said,
             both by the same talker and across talkers. Rather than
             hurting learning, lab studies suggest that talker
             variability helps infants learn similar sounding words.
             However, very little is known about how much variability
             infants hear within a single talker or across talkers in
             naturalistic input. Here, we quantified these types of
             talker variability for highly frequent words spoken to 44
             infants, from naturalistic recordings sampled longitudinally
             over a year of life (from 6-17 months). We used
             non-contrastive acoustic measurements (e.g. mean pitch,
             duration, harmonics-to-noise ratio) and holistic measures of
             sound similarity (normalized acoustic distance) to quantify
             acoustic variability. We find three key results. First,
             pitch-based variability was generally lower for infants' top
             talker than across their other talkers, but overall acoustic
             distance is higher for tokens from the top talker versus the
             others. Second, the amount of acoustic variability infants
             heard could not be predicted from, and thus was not
             redundant with, other properties of the input such as number
             of talkers or tokens, or proportion of speech from
             particular sources (e.g. women, children, electronics).
             Finally, we find that patterns of pitch-based acoustic
             variability heard in naturalistic input were similar to
             those found with in-lab stimuli that facilitated word
             learning. This large-scale quantification of talker
             variability in infants' everyday input sets the stage for
             linking naturally-occurring variability ‘in the wild’ to
             early word learning.</p>},
   Doi = {10.31234/osf.io/2xj36},
   Key = {fds360578}
}

@article{fds363193,
   Author = {Liu, J and Hilton, C and Bergelson, E and Mehr, S},
   Title = {Language experience predicts music processing in ½ million
             speakers of 54 languages},
   Year = {2021},
   url = {http://dx.doi.org/10.1101/2021.10.18.464888},
   Abstract = {Tonal languages differ from other languages in their use of
             pitch (tones) to distinguish words. Lifelong experience
             speaking and hearing tonal languages has been argued to
             shape auditory processing in ways that generalize beyond the
             perception of linguistic pitch to the perception of pitch in
             other domains like music. To examine this, we first
             conducted a meta-analysis, finding moderate evidence for
             this idea, but in studies strongly limited by mostly small
             sample sizes in only a few tonal languages and countries.
             This makes it challenging to disentangle the effects of
             linguistic experience from variability in music training
             experience, cultural differences, and other potential
             confounds. To address these issues, we used web-based
             citizen science to test this question on a global scale. We
             assessed music perception skill in n = 34, 034 native
             speakers of 19 tonal languages (e.g., Mandarin, Yoruba) and
             compared their performance to n = 459, 066 native speakers
             of other languages, including 6 pitch-accented (e.g.,
             Japanese) and 29 non-tonal languages (e.g., Hungarian).
             Whether or not participants had taken music lessons, native
             speakers of all 19 tonal languages had an improved ability
             to discriminate musical melodies. But this improvement came
             with a trade-off: relative to speakers of pitch-accented or
             non-tonal languages, tonal language speakers were also worse
             at processing the musical beat. These results, which held
             across tonal languages from a variety of geographic regions
             and were robust to geographic and demographic variation,
             demonstrate that linguistic experience shapes music
             perception ability, with implications for relations between
             music, language, and culture in the human
             mind.},
   Doi = {10.1101/2021.10.18.464888},
   Key = {fds363193}
}

@article{fds349992,
   Author = {Laing, C and Bergelson, E},
   Title = {From babble to words: Infants' early productions match words
             and objects in their environment.},
   Journal = {Cognitive psychology},
   Volume = {122},
   Pages = {101308},
   Year = {2020},
   Month = {November},
   url = {http://dx.doi.org/10.1016/j.cogpsych.2020.101308},
   Abstract = {Infants' early babbling allows them to engage in
             proto-conversations with caretakers, well before clearly
             articulated, meaningful words are part of their productive
             lexicon. Moreover, the well-rehearsed sounds from babble
             serve as a perceptual 'filter', drawing infants' attention
             towards words that match the sounds they can reliably
             produce. Using naturalistic home recordings of 44
             10-11-month-olds (an age with high variability in early
             speech sound production), this study tests whether infants'
             early consonant productions match words and objects in their
             environment. We find that infants' babble matches the
             consonants produced in their caregivers' speech. Infants
             with a well-established consonant repertoire also match
             their babble to objects in their environment. Our findings
             show that infants' early consonant productions are shaped by
             their input: by 10 months, the sounds of babble match what
             infants see and hear.},
   Doi = {10.1016/j.cogpsych.2020.101308},
   Key = {fds349992}
}

@article{fds366003,
   Author = {Garrison, H and Baudet, G and Breitfeld, E and Aberman, A and Bergelson,
             E},
   Title = {Familiarity Plays a Small Role in Noun Comprehension at
             12-18 months},
   Year = {2020},
   Month = {October},
   url = {http://dx.doi.org/10.31234/osf.io/b3pj6},
   Abstract = {<p>Infants amass thousands of hours of experience with
             particular items, each of which is representative of a
             broader category that often shares perceptual features.
             Robust word comprehension requires generalizing known labels
             to new category members. While young infants have been found
             to look at common nouns when they are named aloud, the role
             of item familiarity has not been well-examined. This study
             compares 12-18-month-olds’ word comprehension in the
             context of pairs of their own items (e.g. photos of their
             own shoe and ball) versus new tokens from the same category
             (e.g. a new shoe and ball). Our results replicate previous
             work showing that noun comprehension improves rapidly over
             the second year, while also suggesting that item familiarity
             appears to play a far smaller role in comprehension in this
             age-range. This in turn suggests that even before age two,
             ready generalization beyond particular experiences is an
             intrinsic component of lexical development.</p>},
   Doi = {10.31234/osf.io/b3pj6},
   Key = {fds366003}
}

@article{fds350210,
   Author = {Bergelson, E},
   Title = {The Comprehension Boost in Early Word Learning: Older
             Infants Are Better Learners.},
   Journal = {Child development perspectives},
   Volume = {14},
   Number = {3},
   Pages = {142-149},
   Year = {2020},
   Month = {September},
   url = {http://dx.doi.org/10.1111/cdep.12373},
   Abstract = {Recent research has revealed that infants begin
             understanding words at around 6 months. After that, infants'
             comprehension vocabulary increases gradually in a linear way
             over 8-18 months, according to data from parental
             checklists. In contrast, infants' word comprehension
             improves robustly, qualitatively, and in a nonlinear way
             just after their first birthday, according to data from
             studies on spoken word comprehension. In this review, I
             integrate observational and experimental data to explain
             these divergent results. I argue that infants' comprehension
             boost is not well-explained by changes in their language
             input for common words, but rather by proposing that they
             learn to take better advantage of relatively stable input
             data. Next, I propose potentially complementary theoretical
             accounts of what makes older infants better learners.
             Finally, I suggest how the research community can expand our
             empirical base in this understudied area, and why doing so
             will inform our knowledge about child development.},
   Doi = {10.1111/cdep.12373},
   Key = {fds350210}
}

@article{fds350533,
   Author = {Sheskin, M and Scott, K and Mills, CM and Bergelson, E and Bonawitz, E and Spelke, ES and Fei-Fei, L and Keil, FC and Gweon, H and Tenenbaum, JB and Jara-Ettinger, J and Adolph, KE and Rhodes, M and Frank, MC and Mehr,
             SA and Schulz, L},
   Title = {Online Developmental Science to Foster Innovation, Access,
             and Impact.},
   Journal = {Trends in cognitive sciences},
   Volume = {24},
   Number = {9},
   Pages = {675-678},
   Year = {2020},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.tics.2020.06.004},
   Abstract = {We propose that developmental cognitive science should
             invest in an online CRADLE, a Collaboration for Reproducible
             and Distributed Large-Scale Experiments that crowdsources
             data from families participating on the internet. Here, we
             discuss how the field can work together to further expand
             and unify current prototypes for the benefit of researchers,
             science, and society.},
   Doi = {10.1016/j.tics.2020.06.004},
   Key = {fds350533}
}

@article{fds349386,
   Author = {Garrison, H and Baudet, G and Breitfeld, E and Aberman, A and Bergelson,
             E},
   Title = {Familiarity plays a small role in noun comprehension at
             12-18 months.},
   Journal = {Infancy : the official journal of the International Society
             on Infant Studies},
   Volume = {25},
   Number = {4},
   Pages = {458-477},
   Year = {2020},
   Month = {July},
   url = {http://dx.doi.org/10.1111/infa.12333},
   Abstract = {Infants amass thousands of hours of experience with
             particular items, each of which is representative of a
             broader category that often shares perceptual features.
             Robust word comprehension requires generalizing known labels
             to new category members. While young infants have been found
             to look at common nouns when they are named aloud, the role
             of item familiarity has not been well examined. This study
             compares 12- to 18-month-olds' word comprehension in the
             context of pairs of their own items (e.g., photographs of
             their own shoe and ball) versus new tokens from the same
             category (e.g., a new shoe and ball). Our results replicate
             previous work showing that noun comprehension improves
             rapidly over the second year, while also suggesting that
             item familiarity appears to play a far smaller role in
             comprehension in this age range. This in turn suggests that
             even before age 2, ready generalization beyond particular
             experiences is an intrinsic component of lexical
             development.},
   Doi = {10.1111/infa.12333},
   Key = {fds349386}
}

@article{fds366004,
   Author = {Laing, C and Bergelson, E},
   Title = {From babble to words: Infants’ early productions match
             words and objects in their environment},
   Year = {2020},
   Month = {June},
   url = {http://dx.doi.org/10.31234/osf.io/wp3n4},
   Abstract = {<p>Infants’ early babbling allows them to engage in
             proto-conversations with caretakers, well before clearly
             articulated, meaningful words are part of their productive
             lexicon. Moreover, the well-rehearsed sounds from babble
             serve as a perceptual ‘filter’, drawing infants’
             attention towards words that match the sounds they can
             reliably produce. Using naturalistic home recordings of 44
             10-11-month-olds (an age with high variability in early
             speech sound production), this study tests whether
             infants’ early consonant productions match words and
             objects in their environment. We find that infants’ babble
             matches the consonants produced in their caregivers’
             speech. Infants with a well-established consonant repertoire
             also match their babble to objects in their environment. Our
             findings show that infants’ early consonant productions
             are shaped by their input: by 10 months, the sounds of
             babble match what infants see and hear.</p>},
   Doi = {10.31234/osf.io/wp3n4},
   Key = {fds366004}
}

@article{fds366005,
   Author = {Garrison, H and Baudet, G and Breitfeld, E and Aberman, A and Bergelson,
             E},
   Title = {Familiarity Plays a Small Role in Noun Comprehension at
             12-18 months},
   Year = {2020},
   Month = {April},
   url = {http://dx.doi.org/10.31234/osf.io/sz38y},
   Abstract = {<p>Infants amass thousands of hours of experience with
             particular items, each of which is representative of a
             broader category that often shares perceptual features.
             Robust word comprehension requires generalizing known labels
             to new category members. While young infants have been found
             to look at common nouns when they are named aloud, the role
             of item familiarity has not been well-examined. This study
             compares 12-18-month-olds’ word comprehension in the
             context of pairs of their own items (e.g. photos of their
             own shoe and ball) versus new tokens from the same category
             (e.g. a new shoe and ball). Our results replicate previous
             work showing that noun comprehension improves rapidly over
             the second year, while also suggesting that item familiarity
             appears to play a far smaller role in comprehension in this
             age-range. This in turn suggests that even before age two,
             ready generalization beyond particular experiences is an
             intrinsic component of lexical development.</p>},
   Doi = {10.31234/osf.io/sz38y},
   Key = {fds366005}
}

@article{fds345448,
   Author = {Bulgarelli, F and Bergelson, E},
   Title = {Look who's talking: A comparison of automated and
             human-generated speaker tags in naturalistic day-long
             recordings.},
   Journal = {Behavior research methods},
   Volume = {52},
   Number = {2},
   Pages = {641-653},
   Year = {2020},
   Month = {April},
   url = {http://dx.doi.org/10.3758/s13428-019-01265-7},
   Abstract = {The LENA system has revolutionized research on language
             acquisition, providing both a wearable device to collect
             day-long recordings of children's environments, and a set of
             automated outputs that process, identify, and classify
             speech using proprietary algorithms. This output includes
             information about input sources (e.g., adult male,
             electronics). While this system has been tested across a
             variety of settings, here we delve deeper into validating
             the accuracy and reliability of LENA's automated
             diarization, i.e., tags of who is talking. Specifically, we
             compare LENA's output with a gold standard set of manually
             generated talker tags from a dataset of 88 day-long
             recordings, taken from 44 infants at 6 and 7 months, which
             includes 57,983 utterances. We compare accuracy across a
             range of classifications from the original Lena Technical
             Report, alongside a set of analyses examining classification
             accuracy by utterance type (e.g., declarative, singing).
             Consistent with previous validations, we find overall high
             agreement between the human and LENA-generated speaker tags
             for adult speech in particular, with poorer performance
             identifying child, overlap, noise, and electronic speech
             (accuracy range across all measures: 0-92%). We discuss
             several clear benefits of using this automated system
             alongside potential caveats based on the error patterns we
             observe, concluding with implications for research using
             LENA-generated speaker tags.},
   Doi = {10.3758/s13428-019-01265-7},
   Key = {fds345448}
}

@article{fds349427,
   Author = {Cristia, A and Bulgarelli, F and Bergelson, E},
   Title = {Accuracy of the Language Environment Analysis System
             Segmentation and Metrics: A Systematic Review.},
   Journal = {Journal of speech, language, and hearing research :
             JSLHR},
   Volume = {63},
   Number = {4},
   Pages = {1093-1105},
   Year = {2020},
   Month = {April},
   url = {http://dx.doi.org/10.1044/2020_jslhr-19-00017},
   Abstract = {Purpose The Language Environment Analysis (LENA) system
             provides automated measures facilitating clinical and
             nonclinical research and interventions on language
             development, but there are only a few, scattered independent
             reports of these measures' validity. The objectives of the
             current systematic review were to (a) discover studies
             comparing LENA output with manual annotation, namely,
             accuracy of talker labels, as well as involving adult word
             counts (AWCs), conversational turn counts (CTCs), and child
             vocalization counts (CVCs); (b) describe them qualitatively;
             (c) quantitatively integrate them to assess central
             tendencies; and (d) quantitatively integrate them to assess
             potential moderators. Method Searches on Google Scholar,
             PubMed, Scopus, and PsycInfo were combined with expert
             knowledge, and interarticle citations resulting in 238
             records screened and 73 records whose full text was
             inspected. To be included, studies must target children
             under the age of 18 years and report on accuracy of LENA
             labels (e.g., precision and/or recall) and/or AWC, CTC, or
             CVC (correlations and/or error metrics). Results A total of
             33 studies, in 28 articles, were discovered. A qualitative
             review revealed most validation studies had not been peer
             reviewed as such and failed to report key methodology and
             results. Quantitative integration of the results was
             possible for a broad definition of recall and precision
             (<i>M</i> = 59% and 68%, respectively; <i>N</i> = 12-13),
             for AWC (mean <i>r</i> = .79, <i>N</i> = 13), CVC (mean
             <i>r</i> = .77, <i>N</i> = 5), and CTC (mean <i>r</i> = .36,
             <i>N</i> = 6). Publication bias and moderators could not be
             assessed meta-analytically. Conclusion Further research and
             improved reporting are needed in studies evaluating LENA
             segmentation and quantification accuracy, with work
             investigating CTC being particularly urgent. Supplemental
             Material https://osf.io/4nhms/.},
   Doi = {10.1044/2020_jslhr-19-00017},
   Key = {fds349427}
}

@article{fds359985,
   Author = {Frank, MC and Alcock, KJ and Arias-Trejo, N and Aschersleben, G and Baldwin, D and Barbu, S and Bergelson, E and Bergmann, C and Black, AK and Blything, R and Böhland, MP and Bolitho, P and Borovsky, A and Brady,
             SM and Braun, B and Brown, A and Byers-Heinlein, K and Campbell, LE and Cashon, C and Choi, M and Christodoulou, J and Cirelli, LK and Conte, S and Cordes, S and Cox, C and Cristia, A and Cusack, R and Davies, C and de
             Klerk, M and Delle Luche and C and de Ruiter, L and Dinakar, D and Dixon,
             KC and Durier, V and Durrant, S and Fennell, C and Ferguson, B and Ferry,
             A and Fikkert, P and Flanagan, T and Floccia, C and Foley, M and Fritzsche,
             T and Frost, RLA and Gampe, A and Gervain, J and Gonzalez-Gomez, N and Gupta, A and Hahn, LE and Hamlin, JK and Hannon, EE and Havron, N and Hay,
             J and Hernik, M and Höhle, B and Houston, DM and Howard, LH and Ishikawa,
             M and Itakura, S and Jackson, I and Jakobsen, KV and Jarto, M and Johnson,
             SP and Junge, C and Karadag, D and Kartushina, N and Kellier, DJ and Keren-Portnoy, T and Klassen, K and Kline, M and Ko, ES and Kominsky,
             JF and Kosie, JE and Kragness, HE and Krieger, AAR and Krieger, F and Lany,
             J and Lazo, RJ and Lee, M and Leservoisier, C and Levelt, C and Lew-Williams, C and Lippold, M and Liszkowski, U and Liu, L and Luke,
             SG and Lundwall, RA and Cassia, VM and Mani, N and Marino, C and Martin, A and Mastroberardino, M and Mateu, V and Mayor, J and Menn, K and Michel, C and Moriguchi, Y and Morris, B and Nave, KM and Nazzi,
             T},
   Title = {Quantifying Sources of Variability in Infancy Research Using
             the Infant-Directed-Speech Preference},
   Journal = {Advances in Methods and Practices in Psychological
             Science},
   Volume = {3},
   Number = {1},
   Pages = {24-52},
   Publisher = {SAGE Publications},
   Year = {2020},
   Month = {March},
   url = {http://dx.doi.org/10.1177/2515245919900809},
   Abstract = {Psychological scientists have become increasingly concerned
             with issues related to methodology and replicability, and
             infancy researchers in particular face specific challenges
             related to replicability: For example, high-powered studies
             are difficult to conduct, testing conditions vary across
             labs, and different labs have access to different infant
             populations. Addressing these concerns, we report on a
             large-scale, multisite study aimed at (a) assessing the
             overall replicability of a single theoretically important
             phenomenon and (b) examining methodological, cultural, and
             developmental moderators. We focus on infants’ preference
             for infant-directed speech (IDS) over adult-directed speech
             (ADS). Stimuli of mothers speaking to their infants and to
             an adult in North American English were created using
             seminaturalistic laboratory-based audio recordings.
             Infants’ relative preference for IDS and ADS was assessed
             across 67 laboratories in North America, Europe, Australia,
             and Asia using the three common methods for measuring
             infants’ discrimination (head-turn preference, central
             fixation, and eye tracking). The overall meta-analytic
             effect size (Cohen’s d) was 0.35, 95% confidence interval
             = [0.29, 0.42], which was reliably above zero but smaller
             than the meta-analytic mean computed from previous
             literature (0.67). The IDS preference was significantly
             stronger in older children, in those children for whom the
             stimuli matched their native language and dialect, and in
             data from labs using the head-turn preference procedure.
             Together, these findings replicate the IDS preference but
             suggest that its magnitude is modulated by development,
             native-language experience, and testing procedure.},
   Doi = {10.1177/2515245919900809},
   Key = {fds359985}
}

@article{fds366006,
   Author = {Cristia, A and Lavechin, M and Scaff, C and Soderstrom, M and Rowland,
             CF and Räsänen, O and Bunce, JP and Bergelson, E},
   Title = {A thorough evaluation of the Language Environment Analysis
             (LENATM) system},
   Year = {2020},
   Month = {January},
   url = {http://dx.doi.org/10.31219/osf.io/czbym},
   Abstract = {<p>In the previous decade, dozens of studies involving
             thousands of children across several research disciplines
             have made use of a combined daylong audio-recorder and
             automated algorithmic analysis called the LENA^®^ system,
             which aims to assess children's language environment. While
             the system's prevalence in the language acquisition domain
             is steadily growing, there are only scattered validation
             efforts, on only some of its key characteristics. Here, we
             assess the LENA^®^ system's accuracy across all of its key
             measures: speaker classification, Child Vocalization Counts
             (CVC), Conversational Turn Counts (CTC), and Adult Word
             Counts (AWC). Our assessment is based on manual annotation
             of clips that have been randomly or periodically sampled out
             of daylong recordings, collected from (a) populations
             similar to the system's original training data (North
             American English-learning children aged 3-36 months), (b)
             children learning another dialect of English (UK), and (c)
             slightly older children growing up in a different linguistic
             and socio-cultural setting (Tsimane' learners in rural
             Bolivia). We find reasonably high accuracy in some measures
             (AWC, CVC), with more problematic levels of performance in
             others (CTC, precision of male adults and other children).
             Statistical analyses do not support the view that
             performance is worse for children who are dissimilar from
             the LENA^®^ original training set. Whether LENA^®^ results
             are accurate enough for a given research, educational, or
             clinical application depends largely on the specifics at
             hand. We therefore conclude with a set of recommendations to
             help researchers make this determination for their
             goals.</p>},
   Doi = {10.31219/osf.io/czbym},
   Key = {fds366006}
}

@article{fds355372,
   Author = {Meylan, SC and Levy, RP and Bergelson, E},
   Title = {Children's Expressive and Receptive Knowledge of the English
             Regular Plural},
   Journal = {Proceedings for the 42nd Annual Meeting of the Cognitive
             Science Society: Developing a Mind: Learning in Humans,
             Animals, and Machines, CogSci 2020},
   Pages = {2270-2276},
   Publisher = {cognitivesciencesociety.org},
   Editor = {Denison, S and Mack, M and Xu, Y and Armstrong, BC},
   Year = {2020},
   Month = {January},
   Abstract = {We investigate the development of children's early
             grammatical knowledge using the test case of the English
             regular plural. Previous research points to early
             generalization, with children applying an abstract
             morphological rule to produce novel plurals well before 24
             months. At the same time, children use the plural
             inconsistently with familiar object words, and demonstrate
             limited receptive knowledge of the plural in the absence of
             supporting linguistic features. In the first study to test
             knowledge of the plural within participants using a paradigm
             matched across comprehension and production, we conduct two
             experiments with n = 52 24-36-month-olds: an eyetracking
             task to evaluate what they understand, and a storybook task
             to test how they use the plural. We manipulate both novelty
             (novel vs. familiar object words) and phonological form (/s/
             vs. /z/ plurals). We find strong, age-related evidence of
             productive knowledge of the plural in an expressive task,
             but do not find evidence of receptive knowledge in these
             same children.},
   Key = {fds355372}
}

@article{fds357503,
   Author = {Soderstrom, M and Casillas, M and Bergelson, E and Rosemberg, CR and alam, F and Warlaumont, A and Bunce, J},
   Title = {Developing A Cross-Cultural Annotation System and MetaCorpus
             for Studying Infants’ Real World Language
             Experience},
   Year = {2020},
   url = {http://dx.doi.org/10.31234/osf.io/bf63y},
   Abstract = {<p>Recent issues around reproducibility, best practices, and
             cultural bias impact naturalistic observational approaches
             as much as experimental approaches, but there has been less
             focus on this area. Here, we present a new approach that
             leverages cross-laboratory collaborative, interdisciplinary
             efforts to examine important psychological questions. We
             illustrate this approach with a particular project that
             examines similarities and differences in children’s early
             experiences with language. This project develops a
             comprehensive start-to-finish analysis pipeline by
             developing a flexible and systematic annotation system, and
             implementing this system across a sampling from a
             “metacorpus” of audiorecordings of diverse language
             communities. This resource is publicly available for use,
             sensitive to cultural differences, and flexible to address a
             variety of research questions. It is also uniquely suited
             for use in the development of tools for automated
             analysis.</p>},
   Doi = {10.31234/osf.io/bf63y},
   Key = {fds357503}
}

@article{fds357504,
   Author = {Bunce, J and Soderstrom, M and Bergelson, E and Rosemberg, CR and Stein,
             A and alam, F and Migdalek, M and Casillas, M},
   Title = {A cross-cultural examination of young children’s everyday
             language experiences},
   Year = {2020},
   url = {http://dx.doi.org/10.31234/osf.io/723pr},
   Abstract = {<p>We present an exploratory cross-cultural analysis of the
             quantity of target-child-directed speech and adult-directed
             speech to young children learning North American English (US
             &amp; Canadian), United Kingdom English, Argentinian
             Spanish, Tseltal (Tenejapa, Mayan), and Yélî Dnye (Rossel
             Island, Papuan), using annotations from 69 children aged
             2–36 months. Using a novel methodological approach, our
             cross-cultural findings support prior work suggesting that
             target-child-directed speech quantities are stable across
             early development, while adult-directed speech decreases. A
             preponderance of speech from women was found to a similar
             degree across groups, with less target-child-directed speech
             from men and children in the North American samples than
             elsewhere. Consistently across groups, children also heard
             more adult-directed than target-child-directed speech.
             Finally, the numbers of talkers present at any given moment
             strongly impacted children’s moment-to-moment input
             quantities. These findings illustrate how the structure of
             home life impacts patterns of early language exposure across
             diverse societies.</p>},
   Doi = {10.31234/osf.io/723pr},
   Key = {fds357504}
}

@article{fds366007,
   Author = {Cristia, A and Bulgarelli, F and Bergelson, E},
   Title = {Accuracy of the Language Environment Analysis (LENATM)
             System Segmentation and Metrics: A Systematic
             Review},
   Year = {2019},
   Month = {October},
   url = {http://dx.doi.org/10.31219/osf.io/fhs57},
   Abstract = {<p>Purpose: The Language Environment Analysis (LENATM)
             system provides automated measures facilitating clinical and
             non-clinical research and interventions on language
             development, but there are only a few, scattered independent
             reports of these measures’ validity. The objectives of the
             current systematic review were to (1) Discover studies
             comparing LENATM output with manual annotation, namely
             accuracy of talker labels, as well as involving Adult Word
             Counts (AWC), Conversational Turn Counts (CTC), and Child
             Vocalization Counts (CVC); (2) Describe them qualitatively;
             (3) Quantitatively integrate them to assess central
             tendencies; and (4) Quantitatively integrate them to assess
             potential moderators. Method: Searches on Google Scholar,
             PubMed, Scopus, and PsycInfo were combined with expert
             knowledge, and inter-article citations resulting in 238
             records screened, and 73 records whose full-text was
             inspected. To be included, studies must target children
             under age 18 years and report on accuracy of LENATM labels
             (e.g., precision and/or recall), and/or AWC, CTC, or CVC
             (correlations and/or error metrics). Results: A total of 33
             studies, in 28 articles, were discovered. A qualitative
             review revealed most validation studies had not been
             peer-reviewed as such and failed to report key methodology
             and results. Quantitative integration of the results was
             possible for a broad definition of recall and precision
             (mean = 59% and 68% respectively, N = 12-13), for AWC (mean
             r = .79, N = 13), CVC (mean r = .77, N = 5), and CTC (mean r
             = .36, N = 6). Publication bias and moderators could not be
             assessed meta-analytically. Conclusion: Further research and
             improved reporting are needed in studies evaluating LENA
             segmentation and quantification accuracy, with work
             investigating CTC being particularly urgent.</p>},
   Doi = {10.31219/osf.io/fhs57},
   Key = {fds366007}
}

@article{fds345888,
   Author = {Räsänen, O and Seshadri, S and Karadayi, J and Riebling, E and Bunce,
             J and Cristia, A and Metze, F and Casillas, M and Rosemberg, C and Bergelson, E and Soderstrom, M},
   Title = {Automatic word count estimation from daylong child-centered
             recordings in various language environments using
             language-independent syllabification of speech},
   Journal = {Speech Communication},
   Volume = {113},
   Pages = {63-80},
   Year = {2019},
   Month = {October},
   url = {http://dx.doi.org/10.1016/j.specom.2019.08.005},
   Abstract = {Automatic word count estimation (WCE) from audio recordings
             can be used to quantify the amount of verbal communication
             in a recording environment. One key application of WCE is to
             measure language input heard by infants and toddlers in
             their natural environments, as captured by daylong
             recordings from microphones worn by the infants. Although
             WCE is nearly trivial for high-quality signals in
             high-resource languages, daylong recordings are
             substantially more challenging due to the unconstrained
             acoustic environments and the presence of near- and
             far-field speech. Moreover, many use cases of interest
             involve languages for which reliable ASR systems or even
             well-defined lexicons are not available. A good WCE system
             should also perform similarly for low- and high-resource
             languages in order to enable unbiased comparisons across
             different cultures and environments. Unfortunately, the
             current state-of-the-art solution, the LENA system, is based
             on proprietary software and has only been optimized for
             American English, limiting its applicability. In this paper,
             we build on existing work on WCE and present the steps we
             have taken towards a freely available system for WCE that
             can be adapted to different languages or dialects with a
             limited amount of orthographically transcribed speech data.
             Our system is based on language-independent syllabification
             of speech, followed by a language-dependent mapping from
             syllable counts (and a number of other acoustic features) to
             the corresponding word count estimates. We evaluate our
             system on samples from daylong infant recordings from six
             different corpora consisting of several languages and
             socioeconomic environments, all manually annotated with the
             same protocol to allow direct comparison. We compare a
             number of alternative techniques for the two key components
             in our system: speech activity detection and automatic
             syllabification of speech. As a result, we show that our
             system can reach relatively consistent WCE accuracy across
             multiple corpora and languages (with some limitations). In
             addition, the system outperforms LENA on three of the four
             corpora consisting of different varieties of English. We
             also demonstrate how an automatic neural network-based
             syllabifier, when trained on multiple languages, generalizes
             well to novel languages beyond the training data,
             outperforming two previously proposed unsupervised
             syllabifiers as a feature extractor for WCE.},
   Doi = {10.1016/j.specom.2019.08.005},
   Key = {fds345888}
}

@article{fds343464,
   Author = {Moore, C and Dailey, S and Garrison, H and Amatuni, A and Bergelson,
             E},
   Title = {Point, walk, talk: Links between three early milestones,
             from observation and parental report.},
   Journal = {Developmental psychology},
   Volume = {55},
   Number = {8},
   Pages = {1579-1593},
   Year = {2019},
   Month = {August},
   url = {http://dx.doi.org/10.1037/dev0000738},
   Abstract = {Around their first birthdays, infants begin to point, walk,
             and talk. These abilities are appreciable both by
             researchers with strictly standardized criteria and
             caregivers with more relaxed notions of what each of these
             skills entails. Here, we compare the onsets of these skills
             and links among them across two data collection methods:
             observation and parental report. We examine pointing,
             walking, and talking in a sample of 44 infants studied
             longitudinally from 6 to 18 months. In this sample, links
             between pointing and vocabulary were tighter than those
             between walking and vocabulary, supporting a unified
             sociocommunicative growth account. Indeed, across several
             cross-sectional and longitudinal analyses, pointers had
             larger vocabularies than their nonpointing peers. In
             contrast to previous work, this did not hold for walkers'
             versus crawlers' vocabularies in our sample. Comparing
             across data sources, we find that reported and observed
             estimates of the growing vocabulary and of age of walk onset
             were closely correlated, while agreement between parents and
             researchers on pointing onset and talking onset was weaker.
             Taken together, these results support a developmental
             account in which gesture and language are intertwined
             aspects of early communication and symbolic thinking,
             whereas the shift from crawling to walking appears
             indistinct from age in its relation with language. We
             conclude that pointing, walking, and talking are on similar
             timelines yet distinct from one another, and discuss
             methodological and theoretical implications in the context
             of early development. (PsycINFO Database Record (c) 2019
             APA, all rights reserved).},
   Doi = {10.1037/dev0000738},
   Key = {fds343464}
}

@article{fds366580,
   Author = {Cristia, A and Lavechin, M and Scaff, C and Soderstrom, M and Rowland,
             CF and Räsänen, O and Bunce, JP and Bergelson, E},
   Title = {A thorough evaluation of the Language Environment Analysis
             (LENATM) system},
   Year = {2019},
   Month = {August},
   url = {http://dx.doi.org/10.31219/osf.io/mxr8s},
   Abstract = {<p>In the previous decade, dozens of studies involving
             thousands of children across several research disciplines
             have made use of a combined daylong audio-recorder and
             automated algorithmic analysis called the LENA^®^ system,
             which aims to assess children's language environment. While
             the system's prevalence in the language acquisition domain
             is steadily growing, there are only scattered validation
             efforts, on only some of its key characteristics. Here, we
             assess the LENA^®^ system's accuracy across all of its key
             measures: speaker classification, Child Vocalization Counts
             (CVC), Conversational Turn Counts (CTC), and Adult Word
             Counts (AWC). Our assessment is based on manual annotation
             of clips that have been randomly or periodically sampled out
             of daylong recordings, collected from (a) populations
             similar to the system's original training data (North
             American English-learning children aged 3-36 months), (b)
             children learning another dialect of English (UK), and (c)
             slightly older children growing up in a different linguistic
             and socio-cultural setting (Tsimane' learners in rural
             Bolivia). We find reasonably high accuracy in some measures
             (AWC, CVC), with more problematic levels of performance in
             others (CTC, precision of male adults and other children).
             Statistical analyses do not support the view that
             performance is worse for children who are dissimilar from
             the LENA^®^ original training set. Whether LENA^®^ results
             are accurate enough for a given research, educational, or
             clinical application depends largely on the specifics at
             hand. We therefore conclude with a set of recommendations to
             help researchers make this determination for their
             goals.</p>},
   Doi = {10.31219/osf.io/mxr8s},
   Key = {fds366580}
}

@article{fds366581,
   Author = {Moore, C and Dailey, S and Garrison, H and Amatuni, A and Bergelson,
             E},
   Title = {Point, Walk, Talk: Links Between Three Early Milestones,
             from Observation and Parental Report},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.31234/osf.io/g6q5u},
   Abstract = {<p>Around their first birthdays, infants begin to point,
             walk, and talk. These abilities are appreciable both by
             researchers with strictly standardized criteria and
             caregivers with more relaxed notions of what each of these
             skills entails. Here we compare the onsets of these skills
             and links among them across two data collection methods:
             observation and parental report. We examine pointing,
             walking, and talking in a sample of 44 infants studied
             longitudinally from 6–18 months. In this sample, links
             between pointing and vocabulary were tighter than those
             between walking and vocabulary, supporting a unified
             socio-communicative growth account. Indeed, across several
             cross-sectional and longitudinal analyses, pointers had
             larger vocabularies than their non-pointing peers. In
             contrast to previous work, this did not hold for walkers’
             vs. crawlers’ vocabularies in our sample. Comparing across
             data sources, we find that reported and observed estimates
             of the growing vocabulary and of age of walk onset were
             closely correlated, while agreement between parents and
             researchers on pointing onset and talking onset was weaker.
             Taken together, these results support a developmental
             account in which gesture and language are intertwined
             aspects of early communication and symbolic thinking,
             whereas the shift from crawling to walking appears
             indistinct from age in its relation with language. We
             conclude that pointing, walking, and talking are on similar
             timelines yet distinct from one another, and discuss
             methodological and theoretical implications in the context
             of early development.</p>},
   Doi = {10.31234/osf.io/g6q5u},
   Key = {fds366581}
}

@article{fds366582,
   Author = {Bergelson, E and Casillas, M and Soderstrom, M and Seidl, A and Warlaumont, AS and Amatuni, A},
   Title = {Inside Front Cover: Cover Image, Volume 22, Issue
             1},
   Journal = {Developmental Science},
   Volume = {22},
   Number = {1},
   Pages = {e12785-e12785},
   Publisher = {Wiley},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1111/desc.12785},
   Doi = {10.1111/desc.12785},
   Key = {fds366582}
}

@article{fds347108,
   Author = {Schuller, BW and Batliner, A and Bergler, C and Pokorny, FB and Krajewski, J and Cychosz, M and Vollmann, R and Roelen, SD and Schnieder, S and Bergelson, E and Cristia, A and Seidl, A and Warlaumont, AS and Yankowitz, L and Nöth, E and Amiriparian, S and Hantke, S and Schmitt, M},
   Title = {The INTERSPEECH 2019 computational paralinguistics
             challenge: Styrian dialects, continuous sleepiness, baby
             sounds & Orca activity},
   Journal = {Proceedings of the Annual Conference of the International
             Speech Communication Association, INTERSPEECH},
   Volume = {2019-September},
   Pages = {2378-2382},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.21437/Interspeech.2019-1122},
   Abstract = {The INTERSPEECH 2019 Computational Paralinguistics Challenge
             addresses four different problems for the first time in a
             research competition under well-defined conditions: In the
             Styrian Dialects Sub-Challenge, three types of
             Austrian-German dialects have to be classified; in the
             Continuous Sleepiness Sub-Challenge, the sleepiness of a
             speaker has to be assessed as regression problem; in the
             Baby Sound Sub-Challenge, five types of infant sounds have
             to be classified; and in the Orca Activity Sub-Challenge,
             orca sounds have to be detected. We describe the
             Sub-Challenges and baseline feature extraction and
             classifiers, which include data-learnt (supervised) feature
             representations by the 'usual' ComParE and BoAW features,
             and deep unsupervised representation learning using the
             AUDEEP toolkit.},
   Doi = {10.21437/Interspeech.2019-1122},
   Key = {fds347108}
}

@article{fds355373,
   Author = {Bunce, J and Bergelson, E and Warlaumont, A and Casillas,
             M},
   Title = {Daylong data: Raw audio to transcript via automated &
             manual open-science tools},
   Journal = {Proceedings of the 41st Annual Meeting of the Cognitive
             Science Society: Creativity + Cognition + Computation,
             CogSci 2019},
   Pages = {15-16},
   Publisher = {cognitivesciencesociety.org},
   Editor = {Goel, AK and Seifert, CM and Freksa, C},
   Year = {2019},
   Month = {January},
   ISBN = {9780991196777},
   Abstract = {Several of the central questions in language, social
             cognition, and developmental research focus on the roles of
             input, output, and interaction on learning and
             communication. While it has become easy to collect long-form
             recordings, getting useful data out of them is a more
             daunting task. Across four mini-sessions, this tutorial aims
             to address pre- and post-data collection concerns, and
             provide a hands-on introduction to manual and automated
             annotation techniques. Attendees will leave this tutorial
             with resources and concrete experience for collecting,
             annotating, and sharing/archiving naturalistic recordings,
             including specific open-science practices relevant for these
             data.},
   Key = {fds355373}
}

@article{fds337129,
   Author = {Bergelson, E and Amatuni, A and Dailey, S and Koorathota, S and Tor,
             S},
   Title = {Day by day, hour by hour: Naturalistic language input to
             infants.},
   Journal = {Developmental science},
   Volume = {22},
   Number = {1},
   Pages = {e12715},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1111/desc.12715},
   Abstract = {Measurements of infants' quotidian experiences provide
             critical information about early development. However, the
             role of sampling methods in providing these measurements is
             rarely examined. Here we directly compare language input
             from hour-long video-recordings and daylong audio-recordings
             within the same group of 44 infants at 6 and 7 months. We
             compared 12 measures of language quantity and lexical
             diversity, talker variability, utterance-type, and object
             presence, finding moderate correlations across
             recording-types. However, video-recordings generally
             featured far denser noun input across these measures
             compared to the daylong audio-recordings, more akin to
             'peak' audio hours (though not as high in talkers and
             word-types). Although audio-recordings captured ~10 times
             more awake-time than videos, the noun input in them was only
             2-4 times greater. Notably, whether we compared videos to
             daylong audio-recordings or peak audio times, videos
             featured relatively fewer declaratives and more questions;
             furthermore, the most common video-recorded nouns were less
             consistent across families than the top audio-recording
             nouns were. Thus, hour-long videos and daylong
             audio-recordings revealed fairly divergent pictures of the
             language infants hear and learn from in their daily lives.
             We suggest that short video-recordings provide a dense and
             somewhat different sample of infants' language experiences,
             rather than a typical one, and should be used cautiously for
             extrapolation about common words, talkers, utterance-types,
             and contexts at larger timescales. If theories of language
             development are to be held accountable to 'facts on the
             ground' from observational data, greater care is needed to
             unpack the ramifications of sampling methods of early
             language input.},
   Doi = {10.1111/desc.12715},
   Key = {fds337129}
}

@article{fds338532,
   Author = {Laing, CE and Bergelson, E},
   Title = {Mothers' Work Status and 17-month-olds' Productive
             Vocabulary.},
   Journal = {Infancy : the official journal of the International Society
             on Infant Studies},
   Volume = {24},
   Number = {1},
   Pages = {101-109},
   Publisher = {WILEY},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1111/infa.12265},
   Abstract = {Literature examining the effects of mothers' work status on
             infant language development is mixed, with little focus on
             varying work-schedules and early vocabulary. We use
             naturalistic data to analyze the productive vocabulary of 44
             17-month-olds in relation to mothers' work status
             (Full-time, Part-time, Stay-at-home) at 6 and 18 months.
             Infants who experienced a combination of care from mothers
             and other caretakers had larger productive vocabularies than
             infants in solely full-time maternal or solely
             other-caretaker care. Our results draw from naturalistic
             data to suggest that this care combination may be
             particularly beneficial for early lexical
             development.},
   Doi = {10.1111/infa.12265},
   Key = {fds338532}
}

@article{fds339362,
   Author = {Bergelson, E and Casillas, M and Soderstrom, M and Seidl, A and Warlaumont, AS and Amatuni, A},
   Title = {What Do North American Babies Hear? A large-scale
             cross-corpus analysis.},
   Journal = {Developmental science},
   Volume = {22},
   Number = {1},
   Pages = {e12724},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1111/desc.12724},
   Abstract = {A range of demographic variables influences how much speech
             young children hear. However, because studies have used
             vastly different sampling methods, quantitative comparison
             of interlocking demographic effects has been nearly
             impossible, across or within studies. We harnessed a unique
             collection of existing naturalistic, day-long recordings
             from 61 homes across four North American cities to examine
             language input as a function of age, gender, and maternal
             education. We analyzed adult speech heard by 3- to
             20-month-olds who wore audio recorders for an entire day. We
             annotated speaker gender and speech register (child-directed
             or adult-directed) for 10,861 utterances from female and
             male adults in these recordings. Examining age, gender, and
             maternal education collectively in this ecologically valid
             dataset, we find several key results. First, the speaker
             gender imbalance in the input is striking: children heard
             2-3× more speech from females than males. Second, children
             in higher-maternal education homes heard more child-directed
             speech than those in lower-maternal education homes.
             Finally, our analyses revealed a previously unreported
             effect: the proportion of child-directed speech in the input
             increases with age, due to a decrease in adult-directed
             speech with age. This large-scale analysis is an important
             step forward in collectively examining demographic variables
             that influence early development, made possible by pooled,
             comparable, day-long recordings of children's language
             environments. The audio recordings, annotations, and
             annotation software are readily available for reuse and
             reanalysis by other researchers.},
   Doi = {10.1111/desc.12724},
   Key = {fds339362}
}

@article{fds357505,
   Author = {Cychosz, M and Cristia, A and Bergelson, E and Casillas, M and Baudet,
             G and Warlaumont, A and Scaff, C and Yankowitz, L and Seidl,
             A},
   Title = {Vocal development in a large-scale crosslinguistic
             corpus},
   Year = {2019},
   url = {http://dx.doi.org/10.31234/osf.io/9vzs5},
   Abstract = {<p>This study evaluates whether early vocalizations develop
             in similar ways in children across diverse cultural
             contexts. We analyze data from daylong audio-recordings of
             49 children (1-36 months) from five different
             language/cultural backgrounds. Citizen scientists annotated
             these recordings to determine if child vocalizations
             contained canonical transitions or not (e.g., "ba'' versus
             "ee''). Results revealed that the proportion of clips
             reported to contain canonical transitions increased with
             age. Further, this proportion exceeded 0.15 by around 7
             months, replicating and extending previous findings on
             canonical vocalization development but using data from the
             natural environments of a culturally and linguistically
             diverse sample. This work explores how crowdsourcing can be
             used to annotate corpora, helping establish developmental
             milestones relevant to multiple languages and cultures.
             Lower inter-annotator reliability on the crowdsourcing
             platform, relative to more traditional in-lab expert
             annotators, means that a larger number of unique annotators
             and/or annotations are required and that crowdsourcing may
             not be a suitable method for more fine-grained annotation
             decisions. Audio clips used for this project are compiled
             into a large-scale infant vocal corpus that is available for
             other researchers to use in future work.</p>},
   Doi = {10.31234/osf.io/9vzs5},
   Key = {fds357505}
}

@article{fds357506,
   Author = {Räsänen, O and Seshadri, S and karadayi, J and Riebling, E and Bunce,
             J and Cristia, A and metze, F and Casillas, M and Rosemberg, CR and Bergelson, E and Soderstrom, M},
   Title = {Automatic word count estimation from daylong child-centered
             recordings in various language environments using
             language-independent syllabification of speech},
   Year = {2019},
   url = {http://dx.doi.org/10.31234/osf.io/xp6k2},
   Abstract = {<p>Automatic word count estimation (WCE) from audio
             recordings can be used to quantify the amount of verbal
             communication in a recording environment. One key
             application of WCE is to measure language input heard by
             infants and toddlers in their natural environments, as
             captured by daylong recordings from microphones worn by the
             infants. Although WCE is nearly trivial for high-quality
             signals in high-resource languages, daylong recordings are
             substantially more challenging due to the unconstrained
             acoustic environments and the presence of near- and
             far-field speech. Moreover, many use cases of interest
             involve languages for which reliable ASR systems or even
             well-defined lexicons are not available. A good WCE system
             should also perform similarly for low- and high-resource
             languages in order to enable unbiased comparisons across
             different cultures and environments. Unfortunately, the
             current state-of- the-art solution, the LENA system, is
             based on proprietary software and has only been optimized
             for American English, limiting its applicability. In this
             paper, we build on existing work on WCE and present the
             steps we have taken towards a freely available system for
             WCE that can be adapted to different languages or dialects
             with a limited amount of orthographically transcribed speech
             data. Our system is based on language-independent
             syllabification of speech, followed by a language-dependent
             mapping from syllable counts (and a number of other acoustic
             features) to the corresponding word count estimates. We
             evaluate our system on samples from daylong infant
             recordings from six different corpora consisting of several
             languages and socioeconomic environments, all manually
             annotated with the same protocol to allow direct comparison.
             We compare a number of alternative techniques for the two
             key components in our system: speech activity detection and
             automatic syllabification of speech. As a result, we show
             that our system can reach relatively consistent WCE accuracy
             across multiple corpora and languages (with some
             limitations). In addition, the system outperforms LENA on
             three of the four corpora consisting of different varieties
             of English. We also demonstrate how an automatic neural
             network-based syllabifier, when trained on multiple
             languages, generalizes well to novel languages beyond the
             training data, outperforming two previously proposed
             unsupervised syllabifiers as a feature extractor for
             WCE.</p>},
   Doi = {10.31234/osf.io/xp6k2},
   Key = {fds357506}
}

@article{fds338184,
   Author = {Ryanta, N and Bergelson, E and Church, K and Cristia, A and Du, J and Ganapathy, S and Khudanpur, S and Kowalski, D and Krishnamoorthy, M and Kulshreshta, R and Liberman, M and Lu, YD and Maciejewski, M and Metze,
             F and Profant, J and Sun, L and Tsao, Y and Yu, Z},
   Title = {Enhancement and analysis of conversational speech: JSALT
             2017},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2018-April},
   Pages = {5154-5158},
   Publisher = {IEEE},
   Year = {2018},
   Month = {September},
   ISBN = {9781538646588},
   url = {http://dx.doi.org/10.1109/ICASSP.2018.8462468},
   Abstract = {Automatic speech recognition is more and more widely and
             effectively used. Nevertheless, in some automatic speech
             analysis tasks the state of the art is surprisingly poor.
             One of these is 'diarization', the task of determining who
             spoke when. Diarization is key to processing meeting audio
             and clinical interviews, extended recordings such as police
             body cam or child language acquisition data, and any other
             speech data involving multiple speakers whose voices are not
             cleanly separated into individual channels. Overlapping
             speech, environmental noise and suboptimal recording
             techniques make the problem harder. During the JSALT Summer
             Workshop at CMU in 2017, an international team of
             researchers worked on several aspects of this problem,
             including calibration of the state of the art, detection of
             overlaps, enhancement of noisy recordings, and
             classification of shorter speech segments. This paper
             sketches the workshop's results, and announces plans for a
             'Diarization Challenge' to encourage further
             progress.},
   Doi = {10.1109/ICASSP.2018.8462468},
   Key = {fds338184}
}

@article{fds327239,
   Author = {Bergelson, E and Swingley, D},
   Title = {Young Infants' Word Comprehension Given An Unfamiliar Talker
             or Altered Pronunciations.},
   Journal = {Child development},
   Volume = {89},
   Number = {5},
   Pages = {1567-1576},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.1111/cdev.12888},
   Abstract = {To understand spoken words, listeners must appropriately
             interpret co-occurring talker characteristics and speech
             sound content. This ability was tested in 6- to
             14-months-olds by measuring their looking to named food and
             body part images. In the new talker condition (n = 90),
             pictures were named by an unfamiliar voice; in the
             mispronunciation condition (n = 98), infants' mothers
             "mispronounced" the words (e.g., nazz for nose). Six- to
             7-month-olds fixated target images above chance across
             conditions, understanding novel talkers, and mothers'
             phonologically deviant speech equally. Eleven- to
             14-months-olds also understood new talkers, but performed
             poorly with mispronounced speech, indicating sensitivity to
             phonological deviation. Between these ages, performance was
             mixed. These findings highlight the changing roles of
             acoustic and phonetic variability in early word
             comprehension, as infants learn which variations alter
             meaning.},
   Doi = {10.1111/cdev.12888},
   Key = {fds327239}
}

@article{fds333673,
   Author = {Amatuni, A and He, E and Bergelson, E},
   Title = {Preserved Structure Across Vector Space Representations},
   Volume = {abs/1802.00840},
   Year = {2018},
   Month = {February},
   Abstract = {Certain concepts, words, and images are intuitively more
             similar than others (dog vs. cat, dog vs. spoon), though
             quantifying such similarity is notoriously difficult.
             Indeed, this kind of computation is likely a critical part
             of learning the category boundaries for words within a given
             language. Here, we use a set of 27 items (e.g. 'dog') that
             are highly common in infants' input, and use both image- and
             word-based algorithms to independently compute similarity
             among them. We find three key results. First, the pairwise
             item similarities derived within image-space and word-space
             are correlated, suggesting preserved structure among these
             extremely different representational formats. Second, the
             closest 'neighbors' for each item, within each space, showed
             significant overlap (e.g. both found 'egg' as a neighbor of
             'apple'). Third, items with the most overlapping neighbors
             are later-learned by infants and toddlers. We conclude that
             this approach, which does not rely on human ratings of
             similarity, may nevertheless reflect stable within-class
             structure across these two spaces. We speculate that such
             invariance might aid lexical acquisition, by serving as an
             informative marker of category boundaries.},
   Key = {fds333673}
}

@article{fds366583,
   Author = {Amatuni, A and He, E and Bergelson, E},
   Title = {Preserved Structure Across Vector Space Representations},
   Journal = {Proceedings of the 40th Annual Meeting of the Cognitive
             Science Society, CogSci 2018},
   Pages = {1298-1303},
   Publisher = {cognitivesciencesociety.org},
   Editor = {Kalish, C and Rau, MA and Zhu, XJ and Rogers, TT},
   Year = {2018},
   Month = {January},
   ISBN = {9780991196784},
   Abstract = {Certain concepts, words, and images are intuitively more
             similar than others (dog vs. cat, dog vs. spoon), though
             quantifying such similarity is notoriously difficult.
             Indeed, this kind of computation is likely a critical part
             of learning the category boundaries for words within a given
             language. Here, we use a set of 27 items (e.g. 'dog') that
             are highly common in infants' input, and use both image- and
             word-based algorithms to independently compute similarity
             among them. We find three key results. First, the pairwise
             item similarities derived within image-space and word-space
             are correlated, suggesting preserved structure among these
             extremely different representational formats. Second, the
             closest 'neighbors' for each item, within each space, showed
             significant overlap (e.g. both found 'egg' as a neighbor of
             'apple'). Third, items with the most overlapping neighbors
             are later-learned by infants and toddlers. We conclude that
             this approach, which does not rely on human ratings of
             similarity, may nevertheless reflect stable within-class
             structure across these two spaces. We speculate that such
             invariance might aid lexical acquisition, by serving as an
             informative marker of category boundaries.},
   Key = {fds366583}
}

@article{fds330846,
   Author = {Bergelson, E and Aslin, RN},
   Title = {Nature and origins of the lexicon in 6-mo-olds.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {114},
   Number = {49},
   Pages = {12916-12921},
   Year = {2017},
   Month = {December},
   url = {http://dx.doi.org/10.1073/pnas.1712966114},
   Abstract = {Recent research reported the surprising finding that even
             6-mo-olds understand common nouns [Bergelson E, Swingley D
             (2012) <i>Proc Natl Acad Sci USA</i> 109:3253-3258].
             However, is their early lexicon structured and acquired like
             older learners? We test 6-mo-olds for a hallmark of the
             mature lexicon: cross-word relations. We also examine
             whether properties of the home environment that have been
             linked with lexical knowledge in older children are
             detectable in the initial stage of comprehension. We use a
             new dataset, which includes in-lab comprehension and home
             measures from the same infants. We find evidence for
             cross-word structure: On seeing two images of common nouns,
             infants looked significantly more at named target images
             when the competitor images were semantically unrelated
             (e.g., milk and foot) than when they were related (e.g.,
             milk and juice), just as older learners do. We further find
             initial evidence for home-lab links: common noun
             "copresence" (i.e., whether words' referents were present
             and attended to in home recordings) correlated with in-lab
             comprehension. These findings suggest that, even in neophyte
             word learners, cross-word relations are formed early and the
             home learning environment measurably helps shape the lexicon
             from the outset.},
   Doi = {10.1073/pnas.1712966114},
   Key = {fds330846}
}

@article{fds325486,
   Author = {Frank, MC and Bergelson, E and Bergmann, C and Cristia, A and Floccia,
             C and Gervain, J and Hamlin, JK and Hannon, EE and Kline, M and Levelt, C and Lew-Williams, C and Nazzi, T and Panneton, R and Rabagliati, H and Soderstrom, M and Sullivan, J and Waxman, S and Yurovsky,
             D},
   Title = {A Collaborative Approach to Infant Research: Promoting
             Reproducibility, Best Practices, and Theory-Building.},
   Journal = {Infancy : the official journal of the International Society
             on Infant Studies},
   Volume = {22},
   Number = {4},
   Pages = {421-435},
   Publisher = {WILEY},
   Year = {2017},
   Month = {July},
   url = {http://dx.doi.org/10.1111/infa.12182},
   Abstract = {The ideal of scientific progress is that we accumulate
             measurements and integrate these into theory, but recent
             discussion of replicability issues has cast doubt on whether
             psychological research conforms to this model. Developmental
             research-especially with infant participants-also has
             discipline-specific replicability challenges, including
             small samples and limited measurement methods. Inspired by
             collaborative replication efforts in cognitive and social
             psychology, we describe a proposal for assessing and
             promoting replicability in infancy research: large-scale,
             multi-laboratory replication efforts aiming for a more
             precise understanding of key developmental phenomena. The
             ManyBabies project, our instantiation of this proposal, will
             not only help us estimate how robust and replicable these
             phenomena are, but also gain new theoretical insights into
             how they vary across ages, linguistic communities, and
             measurement methods. This project has the potential for a
             variety of positive outcomes, including less-biased
             estimates of theoretically important effects, estimates of
             variability that can be used for later study planning, and a
             series of best-practices blueprints for future infancy
             research.},
   Doi = {10.1111/infa.12182},
   Key = {fds325486}
}

@article{fds366584,
   Author = {Frank, MC and Bergmann, C and Bergelson, E and Byers-Heinlein, K and Cristia, A and Cusack, R and Dyck, K and floccia, C and Gervain, J and Gonzalez, N and Hamlin, K and Hannon, E and Kellier, D and Kline Struhl,
             M and Lew-Williams, C and Nazzi, T and Panneton, R and Rabagliati, H and Rennels, J and Seidl, A and Yurovsky, D and Soderstrom,
             M},
   Title = {Quantifying sources of variability in infancy research using
             the infant-directed speech preference},
   Year = {2017},
   Month = {April},
   url = {http://dx.doi.org/10.31234/osf.io/s98ab},
   Abstract = {<p>The field of psychology has become increasingly concerned
             with issues related to methodology and replicability.
             Infancy researchers face specific challenges related to
             replicability: high-powered studies are difficult to
             conduct, testing conditions vary across labs, and different
             labs have access to different infant populations, amongst
             other factors. Addressing these concerns, we report on a
             large-scale, multi-site study aimed at 1) assessing the
             overall replicability of a single theoretically-important
             phenomenon and 2) examining methodological, situational,
             cultural, and developmental moderators. We focus on
             infants’ preference for infant-directed speech (IDS) over
             adult-directed speech (ADS). Stimuli of mothers speaking to
             their infants and to an adult were created using
             semi-naturalistic laboratory-based audio recordings in North
             American English. Infants’ relative preference for IDS and
             ADS was assessed across 67 laboratories in North America,
             Europe, Australia, and Asia using the three commonly-used
             infant discrimination methods (head-turn preference, central
             fixation, and eye tracking). The overall meta-analytic
             effect size (Cohen’s *d*) was 0.35 [0.29 - 0.42], which
             was reliably above zero but smaller than the meta-analytic
             mean computed from previous literature (0.67). The IDS
             preference was significantly stronger in older children, in
             those children for whom the stimuli matched their native
             language and dialect, and in data from labs using the
             head-turn preference procedure. Together these findings
             replicate the infant-directed speech preference but suggest
             that its magnitude is modulated by development, native
             language experience, and testing procedure.</p>},
   Doi = {10.31234/osf.io/s98ab},
   Key = {fds366584}
}

@article{fds332031,
   Author = {Casillas, M and Amatuni, A and Seidl, A and Soderstrom, M and Warlaumont, AS and Bergelson, E},
   Title = {What do babies hear? Analyses of child-and adult-directed
             speech},
   Journal = {Proceedings of the Annual Conference of the International
             Speech Communication Association, INTERSPEECH},
   Volume = {2017-August},
   Pages = {2093-2097},
   Publisher = {ISCA},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.21437/Interspeech.2017-1409},
   Abstract = {Child-directed speech is argued to facilitate language
             development, and is found cross-linguistically and
             cross-culturally to varying degrees. However, previous
             research has generally focused on short samples of
             child-caregiver interaction, often in the lab or with
             experimenters present. We test the generalizability of this
             phenomenon with an initial descriptive analysis of the
             speech heard by young children in a large, unique collection
             of naturalistic, daylong home recordings. Trained annotators
             coded automatically-detected adult speech 'utterances' from
             61 homes across 4 North American cities, gathered from
             children (age 2-24 months) wearing audio recorders during a
             typical day. Coders marked the speaker gender (male/female)
             and intended addressee (child/adult), yielding 10,886
             addressee and gender tags from 2,523 minutes of audio (cf.
             HB-CHAAC Interspeech ComParE challenge; Schuller et al., in
             press). Automated speaker-diarization (LENA) incorrectly
             gender-Tagged 30% of male adult utterances, compared to
             manually-coded consensus. Furthermore, we find effects of
             SES and gender on child-directed and overall speech,
             increasing child-directed speech with child age, and
             interactions of speaker gender, child gender, and child age:
             female caretakers increased their childdirected speech more
             with age than male caretakers did, but only for male
             infants. Implications for language acquisition and existing
             classification algorithms are discussed.},
   Doi = {10.21437/Interspeech.2017-1409},
   Key = {fds332031}
}

@article{fds332032,
   Author = {Casillas, M and Bergelson, E and Warlaumont, AS and Cristia, A and Soderstrom, M and VanDam, M and Sloetjes, H},
   Title = {A new workflow for semi-Automatized annotations: Tests with
             long-form naturalistic recordings of childrens language
             environments},
   Journal = {Proceedings of the Annual Conference of the International
             Speech Communication Association, INTERSPEECH},
   Volume = {2017-August},
   Pages = {2098-2102},
   Publisher = {ISCA},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.21437/Interspeech.2017-1418},
   Abstract = {Interoperable annotation formats are fundamental to the
             utility, expansion, and sustainability of collective data
             repositories. In language development research, shared
             annotation schemes have been critical to facilitating the
             transition from raw acoustic data to searchable, structured
             corpora. Current schemes typically require comprehensive and
             manual annotation of utterance boundaries and orthographic
             speech content, with an additional, optional range of tags
             of interest. These schemes have been enormously successful
             for datasets on the scale of dozens of recording hours but
             are untenable for long-format recording corpora, which
             routinely contain hundreds to thousands of audio hours.
             Long-format corpora would benefit greatly from
             (semi-)automated analyses, both on the earliest steps of
             annotation-voice activity detection, utterance segmentation,
             and speaker diarization-As well as later steps-e.g.,
             classification-based codes such as child-vsadult-directed
             speech, and speech recognition to produce phonetic/
             orthographic representations. We present an annotation
             workflow specifically designed for long-format corpora which
             can be tailored by individual researchers and which
             interfaces with the current dominant scheme for short-format
             recordings. The workflow allows semi-Automated annotation
             and analyses at higher linguistic levels. We give one
             example of how the workflow has been successfully
             implemented in a large crossdatabase project. keywords
             Daylong recordings∗Language acquisition∗Annotation∗Speech
             recognition∗Speaker diarization.},
   Doi = {10.21437/Interspeech.2017-1418},
   Key = {fds332032}
}

@article{fds332033,
   Author = {Warlaumont, AS and VanDam, M and Bergelson, E and Cristia,
             A},
   Title = {Home Bank: A repository for long-form real-world audio
             recordings of children},
   Journal = {Proceedings of the Annual Conference of the International
             Speech Communication Association, INTERSPEECH},
   Volume = {2017-August},
   Pages = {815-816},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.21437/Interspeech.2017-2051},
   Abstract = {Copyright © 2017 ISCA. HomeBank is a new component of the
             TalkBank system, focused on long-form (i.e., multi-hour,
             typically daylong) realworld recordings of children's
             language experiences, and it is linked to a GitHub
             repository in which tools for analyzing those recordings can
             be shared. HomeBank constitutes not only a rich resource for
             researchers interested in early language acquisition
             specifically, but also for those seeking to study
             spontaneous speech, media exposure, and audio environments
             more generally. This Show and Tell describes the procedures
             for accessing and contributing HomeBank data and code. It
             also overviews the current contents of the repositories, and
             provides some examples of audio recordings, available
             transcriptions, and currently available analysis
             tools.},
   Doi = {10.21437/Interspeech.2017-2051},
   Key = {fds332033}
}

@article{fds332034,
   Author = {Schuller, B and Steidl, S and Batliner, A and Bergelson, E and Krajewski, J and Janott, C and Amatuni, A and Casillas, M and Seidl, A and Soderstrom, M and Warlaumont, AS and Hidalgo, G and Schnieder, S and Heiser, C and Hohenhorst, W and Herzog, M and Schmitt, M and Qian, K and Zhang, Y and Trigeorgis, G and Tzirakis, P and Zafeiriou,
             S},
   Title = {The INTERSPEECH 2017 Computational Paralinguistics
             Challenge: Addressee, Cold & Snoring},
   Journal = {Proceedings of the Annual Conference of the International
             Speech Communication Association, INTERSPEECH},
   Volume = {2017-August},
   Pages = {3442-3446},
   Publisher = {ISCA},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.21437/Interspeech.2017-43},
   Abstract = {The INTERSPEECH 2017 Computational Paralinguistics Challenge
             addresses three different problems for the first time in
             research competition under well-defined conditions: In the
             Addressee sub-challenge, it has to be determined whether
             speech produced by an adult is directed towards another
             adult or towards a child; in the Cold sub-challenge, speech
             under cold has to be told apart from 'healthy' speech; and
             in the Snoring sub-challenge, four different types of
             snoring have to be classified. In this paper, we describe
             these sub-challenges, their conditions, and the baseline
             feature extraction and classifiers, which include
             data-learnt feature representations by end-to-end learning
             with convolutional and recurrent neural networks, and
             bag-of-audio-words for the first time in the challenge
             series.},
   Doi = {10.21437/Interspeech.2017-43},
   Key = {fds332034}
}

@article{fds376462,
   Author = {Amatuni, A and Bergelson, E},
   Title = {Semantic Networks Generated from Early Linguistic
             Input},
   Journal = {CogSci 2017 - Proceedings of the 39th Annual Meeting of the
             Cognitive Science Society: Computational Foundations of
             Cognition},
   Pages = {1538-1543},
   Year = {2017},
   Month = {January},
   ISBN = {9780991196760},
   Abstract = {Semantic networks generated from different word corpora show
             common structural characteristics, including high degrees of
             clustering, short average path lengths, and scale free
             degree distributions. Previous research has disagreed about
             whether these features emerge from internally- or
             externally-driven properties (i.e. words already in the
             lexicon vs. regularities in the external world), mapping
             onto preferential attachment and preferential acquisition
             accounts, respectively (Steyvers & Tenenbaum, 2005; Hills,
             Maouene, Maouene, Sheya, & Smith, 2009). Such accounts
             suggest that inherent semantic structure shapes new lexical
             growth. Here we extend previous work by creating semantic
             networks using the SEEDLingS corpus, a newly collected
             corpus of linguistic input to infants. Using a recently
             developed LSA-like approach (GLoVe vectors), we confirm the
             presence of previously reported structural characteristics,
             but only in certain ranges of semantic similarity space. Our
             results confirm the robustness of certain aspects of network
             organization, and provide novel evidence in support of
             preferential acquisition accounts.},
   Key = {fds376462}
}

@article{fds327381,
   Author = {Bergelson, E and Aslin, R},
   Title = {Semantic Specificity in One-Year-Olds' Word
             Comprehension.},
   Journal = {Language learning and development : the official journal of
             the Society for Language Development},
   Volume = {13},
   Number = {4},
   Pages = {481-501},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.1080/15475441.2017.1324308},
   Abstract = {The present study investigated infants' knowledge about
             familiar nouns. Infants (n = 46, 12-20-month-olds) saw
             two-image displays of familiar objects, or one familiar and
             one novel object. Infants heard either a matching word (e.g.
             "foot' when seeing foot and juice), a related word (e.g.
             "sock" when seeing foot and juice) or a nonce word (e.g.
             "fep" when seeing a novel object and dog). Across the whole
             sample, infants reliably fixated the referent on matching
             and nonce trials. On the critical related trials we found
             increasingly less looking to the incorrect (but related)
             image with age. These results suggest that one-year-olds
             look at familiar objects both when they hear them labeled
             and when they hear related labels, to similar degrees, but
             over the second year increasingly rely on semantic fit. We
             suggest that infants' initial semantic representations are
             imprecise, and continue to sharpen over the second postnatal
             year.},
   Doi = {10.1080/15475441.2017.1324308},
   Key = {fds327381}
}

@article{fds333674,
   Author = {Amatuni, A and Bergelson, E},
   Title = {Semantic Networks Generated from Early Linguistic
             Input},
   Pages = {1538-1543},
   Publisher = {Cold Spring Harbor Laboratory},
   Editor = {Gunzelmann, G and Howes, A and Tenbrink, T and Davelaar,
             EJ},
   Year = {2017},
   ISBN = {9780991196760},
   url = {http://dx.doi.org/10.1101/157701},
   Abstract = {Semantic networks generated from different word corpora show
             common structural characteristics, including high degrees of
             clustering, short average path lengths, and scale free
             degree distributions. Previous research has disagreed about
             whether these features emerge from internally or externally
             driven properties (i.e. words already in the lexicon vs.
             regularities in the external world), mapping onto
             preferential attachment and preferential acquisition
             accounts, respectively (Steyvers & Tenenbaum, 2005; Hills,
             Maouene, Maouene, Sheya, & Smith, 2009). Such accounts
             suggest that inherent semantic structure shapes new lexical
             growth. Here we extend previous work by creating semantic
             networks using the SEEDLingS corpus, a newly collected
             corpus of linguistic input to infants. Using a recently
             developed LSA-like approach (GLoVe vectors), we confirm the
             presence of previously reported structural characteristics,
             but only in certain ranges of semantic similarity space. Our
             results confirm the robustness of certain aspects of network
             organization, and provide novel evidence in support of
             preferential acquisition accounts.},
   Doi = {10.1101/157701},
   Key = {fds333674}
}

@article{fds333675,
   Author = {Bergelson, E and Amatuni, A and Casillas, M and Seidl, A and Soderstrom,
             M and Warlaumont, AS},
   Title = {Description of the Homebank Child/Adult Addressee Corpus
             (HB-CHAAC).},
   Journal = {INTERSPEECH},
   Publisher = {ISCA},
   Editor = {Lacerda, F},
   Year = {2017},
   Key = {fds333675}
}

@article{fds333676,
   Author = {Laing, C and Bergelson, E},
   Title = {More Siblings Means Lower Input Quality in Early Language
             Development.},
   Journal = {CogSci},
   Publisher = {cognitivesciencesociety.org},
   Editor = {Gunzelmann, G and Howes, A and Tenbrink, T and Davelaar,
             EJ},
   Year = {2017},
   ISBN = {978-0-9911967-6-0},
   Key = {fds333676}
}

@article{fds318667,
   Author = {VanDam, M and Warlaumont, AS and Bergelson, E and Cristia, A and Soderstrom, M and De Palma and P and MacWhinney, B},
   Title = {HomeBank: An Online Repository of Daylong Child-Centered
             Audio Recordings.},
   Journal = {Seminars in speech and language},
   Volume = {37},
   Number = {2},
   Pages = {128-142},
   Year = {2016},
   Month = {May},
   url = {http://dx.doi.org/10.1055/s-0036-1580745},
   Abstract = {HomeBank is introduced here. It is a public, permanent,
             extensible, online database of daylong audio recorded in
             naturalistic environments. HomeBank serves two primary
             purposes. First, it is a repository for raw audio and
             associated files: one database requires special permissions,
             and another redacted database allows unrestricted public
             access. Associated files include metadata such as
             participant demographics and clinical diagnostics, automated
             annotations, and human-generated transcriptions and
             annotations. Many recordings use the child-perspective LENA
             recorders (LENA Research Foundation, Boulder, Colorado,
             United States), but various recordings and metadata can be
             accommodated. The HomeBank database can have both vetted and
             unvetted recordings, with different levels of accessibility.
             Additionally, HomeBank is an open repository for processing
             and analysis tools for HomeBank or similar data sets.
             HomeBank is flexible for users and contributors, making
             primary data available to researchers, especially those in
             child development, linguistics, and audio engineering.
             HomeBank facilitates researchers' access to large-scale data
             and tools, linking the acoustic, auditory, and linguistic
             characteristics of children's environments with a variety of
             variables including socioeconomic status, family
             characteristics, language trajectories, and disorders.
             Automated processing applied to daylong home audio
             recordings is now becoming widely used in early intervention
             initiatives, helping parents to provide richer speech input
             to at-risk children.},
   Doi = {10.1055/s-0036-1580745},
   Key = {fds318667}
}

@article{fds318668,
   Author = {Metze, F and Riebling, E and Warlaumont, AS and Bergelson,
             E},
   Title = {Virtual machines and containers as a platform for
             experimentation},
   Journal = {Proceedings of the Annual Conference of the International
             Speech Communication Association, INTERSPEECH},
   Volume = {08-12-September-2016},
   Pages = {1603-1607},
   Publisher = {ISCA},
   Year = {2016},
   Month = {January},
   url = {http://dx.doi.org/10.21437/Interspeech.2016-997},
   Abstract = {Research on computational speech processing has
             traditionally relied on the availability of a relatively
             large and complex infrastructure, which encompasses data
             (text and audio), tools (feature extraction, model training,
             scoring, possibly on-line and off-line, etc.), glue code,
             and computing. Traditionally, it has been very hard to move
             experiments from one site to another, and to replicate
             experiments. With the increasing availability of shared
             platforms such as commercial cloud computing platforms or
             publicly funded super-computing centers, there is a need and
             an opportunity to abstract the experimental environment from
             the hardware, and distribute complete setups as a virtual
             machine, a container, or some other shareable resource, that
             can be deployed and worked with anywhere. In this paper, we
             discuss our experience with this concept and present some
             tools that the community might find useful. We outline, as a
             case study, how such tools can be applied to a naturalistic
             language acquisition audio corpus.},
   Doi = {10.21437/Interspeech.2016-997},
   Key = {fds318668}
}

@article{fds333677,
   Author = {Bergelson, E},
   Title = {Workshop on Corpus Collection, (Semi)-Automated Analysis,
             and Modeling of Large-Scale Naturalistic Language
             Acquisition Data},
   Journal = {Proceedings of the 38th Annual Meeting of the Cognitive
             Science Society, CogSci 2016},
   Pages = {21-22},
   Publisher = {cognitivesciencesociety.org},
   Editor = {Papafragou, A and Grodner, D and Mirman, D and Trueswell,
             JC},
   Year = {2016},
   Month = {January},
   ISBN = {9780991196739},
   Key = {fds333677}
}

@article{fds318669,
   Author = {Bergelson, E and Swingley, D},
   Title = {Early Word Comprehension in Infants: Replication and
             Extension.},
   Journal = {Language learning and development : the official journal of
             the Society for Language Development},
   Volume = {11},
   Number = {4},
   Pages = {369-380},
   Year = {2015},
   Month = {January},
   url = {http://dx.doi.org/10.1080/15475441.2014.979387},
   Abstract = {A handful of recent experimental reports have shown that
             infants of 6 to 9 months know the meanings of some common
             words. Here, we replicate and extend these findings. With a
             new set of items, we show that when young infants (age 6-16
             months, n=49) are presented with side-by-side video clips
             depicting various common early words, and one clip is named
             in a sentence, they look at the named video at above-chance
             rates. We demonstrate anew that infants understand common
             words by 6-9 months, and that performance increases
             substantially around 14 months. The results imply that 6-9
             month olds' failure to understand words not referring to
             objects (verbs, adjectives, performatives) in a similar
             prior study is not attributable to the use of dynamic video
             depictions. Thus, 6-9 month olds' experience of spoken
             language includes some understanding of common words for
             concrete objects, but relatively impoverished comprehension
             of other words.},
   Doi = {10.1080/15475441.2014.979387},
   Key = {fds318669}
}

@article{fds318670,
   Author = {Bergelson, E and Swingley, D},
   Title = {The acquisition of abstract words by young
             infants.},
   Journal = {Cognition},
   Volume = {127},
   Number = {3},
   Pages = {391-397},
   Year = {2013},
   Month = {June},
   url = {http://dx.doi.org/10.1016/j.cognition.2013.02.011},
   Abstract = {Young infants' learning of words for abstract concepts like
             'all gone' and 'eat,' in contrast to their learning of more
             concrete words like 'apple' and 'shoe,' may follow a
             relatively protracted developmental course. We examined
             whether infants know such abstract words. Parents named one
             of two events shown in side-by-side videos while their
             6-16-month-old infants (n=98) watched. On average, infants
             successfully looked at the named video by 10 months, but not
             earlier, and infants' looking at the named referent
             increased robustly at around 14 months. Six-month-olds
             already understand concrete words in this task (Bergelson &
             Swingley, 2012). A video-corpus analysis of unscripted
             mother-infant interaction showed that mothers used the
             tested abstract words less often in the presence of their
             referent events than they used concrete words in the
             presence of their referent objects. We suggest that
             referential uncertainty in abstract words' teaching
             conditions may explain the later acquisition of abstract
             than concrete words, and we discuss the possible role of
             changes in social-cognitive abilities over the 6-14 month
             period.},
   Doi = {10.1016/j.cognition.2013.02.011},
   Key = {fds318670}
}

@article{fds318673,
   Author = {Bergelson, E and Swingley, D},
   Title = {Social and Environmental Contributors to Infant Word
             Learning},
   Journal = {Cooperative Minds: Social Interaction and Group Dynamics -
             Proceedings of the 35th Annual Meeting of the Cognitive
             Science Society, CogSci 2013},
   Pages = {187-192},
   Publisher = {cognitivesciencesociety.org},
   Editor = {Knauff, M and Pauen, M and Sebanz, N and Wachsmuth,
             I},
   Year = {2013},
   Month = {January},
   ISBN = {9780976831891},
   Abstract = {Infants demonstrate comprehension of early nouns (e.g.
             “hand”) around six months, and comprehension of early
             non-nouns (e.g. “eat”) around 10 months. In two
             experiments, we explore the reasons for this lag. Expt. 1 is
             a gaze-following study, the results of which suggest an
             improvement in point-following around ten months, and reveal
             correlations between pointing and both overall and non-noun
             vocabulary. Expt. 2 is a set of corpus analyses, the results
             of which suggest that word frequency does not explain the
             difference between noun and non-noun age of acquisition,
             while suggesting that the co-presence of words and their
             referents may play an important role. The results of these
             experiments contribute to our understanding of word-learning
             across word classes, and lend support to environmental and
             social factors as having an impact on the trajectory of word
             learning in the first year of life.},
   Key = {fds318673}
}

@article{fds318671,
   Author = {Bergelson, E and Shvartsman, M and Idsardi, WJ},
   Title = {Differences in mismatch responses to vowels and musical
             intervals: MEG evidence.},
   Journal = {PloS one},
   Volume = {8},
   Number = {10},
   Pages = {e76758},
   Year = {2013},
   Month = {January},
   url = {http://dx.doi.org/10.1371/journal.pone.0076758},
   Abstract = {We investigated the electrophysiological response to matched
             two-formant vowels and two-note musical intervals, with the
             goal of examining whether music is processed differently
             from language in early cortical responses. Using
             magnetoencephalography (MEG), we compared the
             mismatch-response (MMN/MMF, an early, pre-attentive
             difference-detector occurring approximately 200 ms
             post-onset) to musical intervals and vowels composed of
             matched frequencies. Participants heard blocks of two
             stimuli in a passive oddball paradigm in one of three
             conditions: sine waves, piano tones and vowels. In each
             condition, participants heard two-formant vowels or musical
             intervals whose frequencies were 11, 12, or 24 semitones
             apart. In music, 12 semitones and 24 semitones are perceived
             as highly similar intervals (one and two octaves,
             respectively), while in speech 12 semitones and 11 semitones
             formant separations are perceived as highly similar (both
             variants of the vowel in 'cut'). Our results indicate that
             the MMN response mirrors the perceptual one: larger MMNs
             were elicited for the 12-11 pairing in the music conditions
             than in the language condition; conversely, larger MMNs were
             elicited to the 12-24 pairing in the language condition that
             in the music conditions, suggesting that within 250 ms of
             hearing complex auditory stimuli, the neural computation of
             similarity, just as the behavioral one, differs
             significantly depending on whether the context is music or
             speech.},
   Doi = {10.1371/journal.pone.0076758},
   Key = {fds318671}
}

@article{fds318672,
   Author = {Bergelson, E and Swingley, D},
   Title = {Young toddlers' word comprehension is flexible and
             efficient.},
   Journal = {PloS one},
   Volume = {8},
   Number = {8},
   Pages = {e73359},
   Year = {2013},
   Month = {January},
   url = {http://dx.doi.org/10.1371/journal.pone.0073359},
   Abstract = {Much of what is known about word recognition in toddlers
             comes from eyetracking studies. Here we show that the speed
             and facility with which children recognize words, as
             revealed in such studies, cannot be attributed to a
             task-specific, closed-set strategy; rather, children's gaze
             to referents of spoken nouns reflects successful search of
             the lexicon. Toddlers' spoken word comprehension was
             examined in the context of pictures that had two possible
             names (such as a cup of juice which could be called "cup" or
             "juice") and pictures that had only one likely name for
             toddlers (such as "apple"), using a visual world
             eye-tracking task and a picture-labeling task (n = 77,
             mean age, 21 months). Toddlers were just as fast and
             accurate in fixating named pictures with two likely names as
             pictures with one. If toddlers do name pictures to
             themselves, the name provides no apparent benefit in word
             recognition, because there is no cost to understanding an
             alternative lexical construal of the picture. In toddlers,
             as in adults, spoken words rapidly evoke their
             referents.},
   Doi = {10.1371/journal.pone.0073359},
   Key = {fds318672}
}

@article{fds318674,
   Author = {Bergelson, E and Swingley, D},
   Title = {At 6-9 months, human infants know the meanings of many
             common nouns.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {109},
   Number = {9},
   Pages = {3253-3258},
   Year = {2012},
   Month = {February},
   url = {http://dx.doi.org/10.1073/pnas.1113380109},
   Abstract = {It is widely accepted that infants begin learning their
             native language not by learning words, but by discovering
             features of the speech signal: consonants, vowels, and
             combinations of these sounds. Learning to understand words,
             as opposed to just perceiving their sounds, is said to come
             later, between 9 and 15 mo of age, when infants develop a
             capacity for interpreting others' goals and intentions.
             Here, we demonstrate that this consensus about the
             developmental sequence of human language learning is flawed:
             in fact, infants already know the meanings of several common
             words from the age of 6 mo onward. We presented 6- to
             9-mo-old infants with sets of pictures to view while their
             parent named a picture in each set. Over this entire age
             range, infants directed their gaze to the named pictures,
             indicating their understanding of spoken words. Because the
             words were not trained in the laboratory, the results show
             that even young infants learn ordinary words through daily
             experience with language. This surprising accomplishment
             indicates that, contrary to prevailing beliefs, either
             infants can already grasp the referential intentions of
             adults at 6 mo or infants can learn words before this
             ability emerges. The precocious discovery of word meanings
             suggests a perspective in which learning vocabulary and
             learning the sound structure of spoken language go hand in
             hand as language acquisition begins.},
   Doi = {10.1073/pnas.1113380109},
   Key = {fds318674}
}

@article{fds318675,
   Author = {Bergelson, E and Idsardi, WJ},
   Title = {A neurophysiological study into the foundations of tonal
             harmony.},
   Journal = {Neuroreport},
   Volume = {20},
   Number = {3},
   Pages = {239-244},
   Year = {2009},
   Month = {February},
   url = {http://dx.doi.org/10.1097/wnr.0b013e32831ddebf},
   Abstract = {Our findings provide magnetoencephalographic evidence that
             the mismatch-negativity response to two-note chords (dyads)
             is modulated by a combination of abstract cognitive
             differences and lower-level differences in the auditory
             signal. Participants were presented with series of
             simple-ratio sinusoidal dyads (perfect fourths and perfect
             fifths) in which the difference between the standard and
             deviant dyad exhibited an interval change, a shift in pitch
             space, or both. In addition, the standard-deviant pair of
             dyads either shared one note or both notes were changed.
             Only the condition that featured both abstract changes
             (interval change and pitch-space shift) and two novel notes
             showed a significantly larger magnetoencephalographic
             mismatch-negativity response than the other conditions in
             the right hemisphere. Implications for music and language
             processing are discussed.},
   Doi = {10.1097/wnr.0b013e32831ddebf},
   Key = {fds318675}
}

@article{fds337244,
   Author = {Bergelson, E and Idsardi, WJ},
   Title = {Structural Biases in Phonology: Infant and Adult Evidence
             from Artificial Language Learning},
   Journal = {PROCEEDINGS OF THE 33RD ANNUAL BOSTON UNIVERSITY CONFERENCE
             ON LANGUAGE DEVELOPMENT, VOLS 1 AND 2},
   Pages = {85-+},
   Publisher = {CASCADILLA PRESS},
   Editor = {Chandlee, J and Franchini, M and Lord, S and Rheiner,
             GM},
   Year = {2009},
   Month = {January},
   ISBN = {978-1-57473-094-4},
   Key = {fds337244}
}

@article{fds330847,
   Author = {Poeppel, D and Bergelson, E},
   Title = {How music speaks to us},
   Journal = {Nature},
   Volume = {452},
   Number = {7188},
   Pages = {695-696},
   Publisher = {Springer Science and Business Media LLC},
   Year = {2008},
   Month = {April},
   url = {http://dx.doi.org/10.1038/452695a},
   Doi = {10.1038/452695a},
   Key = {fds330847}
}


Duke University * Arts & Sciences * Faculty * Staff * Grad * Postdocs * Reload * Login