Linguistics Faculty Database
Linguistics
Arts & Sciences
Duke University

 HOME > Arts & Sciences > Linguistics > Faculty    Search Help Login pdf version printable version 

Publications of Reiko Mazuka    :chronological  alphabetical  combined listing:

%% Journal Articles   
@article{fds335695,
   Author = {Guevara-Rukoz, A and Cristia, A and Ludusan, B and Thiollière, R and Martin, A and Mazuka, R and Dupoux, E},
   Title = {Are Words Easier to Learn From Infant- Than Adult-Directed
             Speech? A Quantitative Corpus-Based Investigation.},
   Journal = {Cognitive Science},
   Year = {2018},
   Month = {May},
   url = {http://dx.doi.org/10.1111/cogs.12616},
   Abstract = {We investigate whether infant-directed speech (IDS) could
             facilitate word form learning when compared to
             adult-directed speech (ADS). To study this, we examine the
             distribution of word forms at two levels, acoustic and
             phonological, using a large database of spontaneous speech
             in Japanese. At the acoustic level we show that, as has been
             documented before for phonemes, the realizations of words
             are more variable and less discriminable in IDS than in ADS.
             At the phonological level, we find an effect in the opposite
             direction: The IDS lexicon contains more distinctive words
             (such as onomatopoeias) than the ADS counterpart. Combining
             the acoustic and phonological metrics together in a global
             discriminability score reveals that the bigger separation of
             lexical categories in the phonological space does not
             compensate for the opposite effect observed at the acoustic
             level. As a result, IDS word forms are still globally less
             discriminable than ADS word forms, even though the effect is
             numerically small. We discuss the implication of these
             findings for the view that the functional role of IDS is to
             improve language learnability.},
   Doi = {10.1111/cogs.12616},
   Key = {fds335695}
}

@article{fds335696,
   Author = {Shin, M and Choi, Y and Mazuka, R},
   Title = {Development of fricative sound perception in Korean infants:
             The role of language experience and infants' initial
             sensitivity.},
   Journal = {Plos One},
   Volume = {13},
   Number = {6},
   Pages = {e0199045},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1371/journal.pone.0199045},
   Abstract = {In this paper, we report data on the development of Korean
             infants' perception of a rare fricative phoneme distinction.
             Korean fricative consonants have received much interest in
             the linguistic community due to the language's distinct
             categorization of sounds. Unlike many fricative contrasts
             utilized in most of the world's languages, Korean fricatives
             (/s*/-/s/) are all voiceless. Moreover, compared with other
             sound categories, fricatives have received very little
             attention in the speech perception development field and no
             studies thus far have examined Korean infants' development
             of native phonology in this domain. Using a visual
             habituation paradigm, we tested 4‒6-month-old and
             7‒9-month-old Korean infants on their abilities to
             discriminate the Korean fricative pair in the [a] vowel
             context, /s*a/-/sa/, which can be distinguished based on
             acoustic cues, such as the durations of aspiration and
             frication noise. Korean infants older than 7 months were
             able to reliably discriminate the fricative pair but younger
             infants did not show clear signs of such discrimination.
             These results add to the growing evidence that there are
             native sound contrasts infants cannot discriminate early on
             without a certain amount of language exposure, providing
             further data to help delineate the specific nature of early
             perceptual capacity.},
   Doi = {10.1371/journal.pone.0199045},
   Key = {fds335696}
}

@article{fds329036,
   Author = {Akimoto, Y and Takahashi, H and Gunji, A and Kaneko, Y and Asano, M and Matsuo, J and Ota, M and Kunugi, H and Hanakawa, T and Mazuka, R and Kamio,
             Y},
   Title = {Alpha band event-related desynchronization underlying social
             situational context processing during irony comprehension: A
             magnetoencephalography source localization
             study.},
   Journal = {Brain and Language},
   Volume = {175},
   Pages = {42-46},
   Year = {2017},
   Month = {December},
   url = {http://dx.doi.org/10.1016/j.bandl.2017.09.002},
   Abstract = {Irony comprehension requires integration of social
             contextual information. Previous studies have investigated
             temporal aspects of irony processing and its neural
             substrates using psychological/electroencephalogram or
             functional magnetic resonance imaging methods, but have not
             clarified the temporospatial neural mechanisms of irony
             comprehension. Therefore, we used magnetoencephalography to
             investigate the neural generators of alpha-band (8-13Hz)
             event-related desynchronization (ERD) occurring from 600 to
             900ms following the onset of a critical sentence at which
             social situational contexts activated ironic representation.
             We found that the right anterior temporal lobe, which is
             involved in processing social knowledge and evaluating
             others' intentions, exhibited stronger alpha ERD following
             an ironic statement than following a literal statement. We
             also found that alpha power in the left anterior temporal
             lobe correlated with the participants' communication
             abilities. These results elucidate the temporospatial neural
             mechanisms of language comprehension in social contexts,
             including non-literal processing.},
   Doi = {10.1016/j.bandl.2017.09.002},
   Key = {fds329036}
}

@article{fds325710,
   Author = {Hirose, Y and Mazuka, R},
   Title = {Exploiting Pitch Accent Information in Compound Processing:
             A Comparison between Adults and 6- to 7-Year-Old
             Children},
   Journal = {Language Learning and Development},
   Volume = {13},
   Number = {4},
   Pages = {375-394},
   Publisher = {Informa UK Limited},
   Year = {2017},
   Month = {October},
   url = {http://dx.doi.org/10.1080/15475441.2017.1292141},
   Abstract = {© 2017 Taylor & Francis Group, LLC. A noun can be
             potentially ambiguous as to whether it is a head on its own,
             or is a modifier of a Noun + Noun compound waiting for its
             head. This study investigates whether young children can
             exploit the prosodic information on a modifier constituent
             preceding the head to facilitate resolution of such
             ambiguity in Japanese. Evidence from English suggests that
             young speakers are not sensitive to compound stress in
             distinguishing between compounds and syntactic phrases
             unless the compound is very familiar (Good, 2008; Vogel &
             Raimy, 2002). This study concerns whether children in
             general have such limited capability to use prosodic cues to
             promptly compute a compound representation without the
             lexical boost, or whether they might show greater
             sensitivity to more categorical compound prosody such as
             that associated with the Compound Accent Rule (CAR) in
             Japanese. A previous study (Hirose & Mazuka, 2015)
             demonstrated that adult Japanese speakers can predict the
             compound structure prior to the head if the prosodic
             information on the modifier unambiguously signals that the
             CAR is being applied. The present study conducted the same
             on-line experiment with children (6- to 7-year-olds) and
             compared the time course of the effects with that of adults
             using permutation-based analysis (Maris & Oosternveld,
             2007). The results reveal that children are sensitive to
             pitch accent information that facilitates the quicker
             processing of the compound or the single head noun
             representation compared to when such prosodic signals are
             less apparent, depending on the type of the lexical accent
             of the noun in question.},
   Doi = {10.1080/15475441.2017.1292141},
   Key = {fds325710}
}

@article{fds326609,
   Author = {Miyazawa, K and Shinya, T and Martin, A and Kikuchi, H and Mazuka,
             R},
   Title = {Vowels in infant-directed speech: More breathy and more
             variable, but not clearer.},
   Journal = {Cognition},
   Volume = {166},
   Pages = {84-93},
   Year = {2017},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.cognition.2017.05.003},
   Abstract = {Infant-directed speech (IDS) is known to differ from
             adult-directed speech (ADS) in a number of ways, and it has
             often been argued that some of these IDS properties
             facilitate infants' acquisition of language. An influential
             study in support of this view is Kuhl et al. (1997), which
             found that vowels in IDS are produced with expanded first
             and second formants (F1/F2) on average, indicating that the
             vowels are acoustically further apart in IDS than in ADS.
             These results have been interpreted to mean that the way
             vowels are produced in IDS makes infants' task of learning
             vowel categories easier. The present paper revisits this
             interpretation by means of a thorough analysis of IDS vowels
             using a large-scale corpus of Japanese natural utterances.
             We will show that the expansion of F1/F2 values does occur
             in spontaneous IDS even when the vowels' prosodic position,
             lexical pitch accent, and lexical bias are accounted for.
             When IDS vowels are compared to carefully read speech (CS)
             by the same mothers, however, larger variability among IDS
             vowel tokens means that the acoustic distances among vowels
             are farther apart only in CS, but not in IDS when compared
             to ADS. Finally, we will show that IDS vowels are
             significantly more breathy than ADS or CS vowels. Taken
             together, our results demonstrate that even though expansion
             of formant values occurs in spontaneous IDS, this expansion
             cannot be interpreted as an indication that the acoustic
             distances among vowels are farther apart, as is the case in
             CS. Instead, we found that IDS vowels are characterized by
             breathy voice, which has been associated with the
             communication of emotional affect.},
   Doi = {10.1016/j.cognition.2017.05.003},
   Key = {fds326609}
}

@article{fds332175,
   Author = {Mazuka, R and Bernard, M and Cristia, A and Dupoux, E and Ludusan,
             B},
   Title = {The role of prosody and speech register in word
             segmentation: A computational modelling perspective},
   Journal = {Acl 2017 55th Annual Meeting of the Association for
             Computational Linguistics, Proceedings of the Conference
             (Long Papers)},
   Volume = {2},
   Pages = {178-183},
   Publisher = {Association for Computational Linguistics},
   Year = {2017},
   Month = {January},
   ISBN = {9781945626760},
   url = {http://dx.doi.org/10.18653/v1/P17-2028},
   Abstract = {© 2017 Association for Computational Linguistics. This
             study explores the role of speech register and prosody for
             the task of word segmentation. Since these two factors are
             thought to play an important role in early language
             acquisition, we aim to quantify their contribution for this
             task. We study a Japanese corpus containing both infant- and
             adult-directed speech and we apply four different word
             segmentation models, with and without knowledge of prosodic
             boundaries. The results showed that the difference between
             registers is smaller than previously reported and that
             prosodic boundary information helps more adult- than
             infant-directed speech.},
   Doi = {10.18653/v1/P17-2028},
   Key = {fds332175}
}

@article{fds332177,
   Author = {Ota, M and Yamane, N and Mazuka, R},
   Title = {The Effects of Lexical Pitch Accent on Infant Word
             Recognition in Japanese.},
   Journal = {Frontiers in Psychology},
   Volume = {8},
   Pages = {2354},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.3389/fpsyg.2017.02354},
   Abstract = {Learners of lexical tone languages (e.g., Mandarin) develop
             sensitivity to tonal contrasts and recognize pitch-matched,
             but not pitch-mismatched, familiar words by 11 months.
             Learners of non-tone languages (e.g., English) also show a
             tendency to treat pitch patterns as lexically contrastive up
             to about 18 months. In this study, we examined if this
             early-developing capacity to lexically encode pitch
             variations enables infants to acquire a pitch accent system,
             in which pitch-based lexical contrasts are obscured by the
             interaction of lexical and non-lexical (i.e., intonational)
             features. Eighteen 17-month-olds learning Tokyo Japanese
             were tested on their recognition of familiar words with the
             expected pitch or the lexically opposite pitch pattern. In
             early trials, infants were faster in shifting their eyegaze
             from the distractor object to the target object than in
             shifting from the target to distractor in the pitch-matched
             condition. In later trials, however, infants showed faster
             distractor-to-target than target-to-distractor shifts in
             both the pitch-matched and pitch-mismatched conditions. We
             interpret these results to mean that, in a pitch-accent
             system, the ability to use pitch variations to recognize
             words is still in a nascent state at 17 months.},
   Doi = {10.3389/fpsyg.2017.02354},
   Key = {fds332177}
}

@article{fds321661,
   Author = {Hayashi, A and Mazuka, R},
   Title = {Emergence of Japanese infants' prosodic preferences in
             infant-directed vocabulary.},
   Journal = {Developmental Psychology},
   Volume = {53},
   Number = {1},
   Pages = {28-37},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.1037/dev0000259},
   Abstract = {The article examines the role of infant-directed vocabulary
             (IDV) in infants language acquisition, specifically
             addressing the question of whether IDV forms that are not
             prominent in adult language may nonetheless be useful to the
             process of acquisition. Japanese IDV offers a good test
             case, as IDV characteristically takes a bisyllabic
             H(eavy)-L(ight) form that is rare in adult speech. In 5
             experiments using the Headturn Preference Procedure (HPP),
             8- to 10-month-old Japanese infants, but not 4- to
             6-month-olds, were found to show a preference for bisyllabic
             H-L words over other types of words. These results
             demonstrate (a) that infants may develop a preference for a
             dominant prosodic form based on infant-directed speech, even
             when it is not a prominent characteristic of adult language;
             and perhaps more importantly, and (b) that infant-directed
             speech may provide a boost for a feature that could be
             useful for infants' acquisition of language even when it not
             prominent in adult language. (PsycINFO Database
             Record},
   Doi = {10.1037/dev0000259},
   Key = {fds321661}
}

@article{fds332176,
   Author = {Sugiura, L and Toyota, T and Matsuba-Kurita, H and Iwayama, Y and Mazuka, R and Yoshikawa, T and Hagiwara, H},
   Title = {Age-Dependent Effects of Catechol-O-Methyltransferase (COMT)
             Gene Val158Met Polymorphism on Language Function in
             Developing Children.},
   Journal = {Cerebral Cortex (New York, N.Y. : 1991)},
   Volume = {27},
   Number = {1},
   Pages = {104-116},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.1093/cercor/bhw371},
   Abstract = {The genetic basis controlling language development remains
             elusive. Previous studies of the catechol-O-methyltransferase
             (COMT) Val158Met genotype and cognition have focused on
             prefrontally guided executive functions involving dopamine.
             However, COMT may further influence posterior cortical
             regions implicated in language perception. We investigated
             whether COMT influences language ability and cortical
             language processing involving the posterior language regions
             in 246 children aged 6-10 years. We assessed language
             ability using a language test and cortical responses
             recorded during language processing using a word repetition
             task and functional near-infrared spectroscopy. The COMT
             genotype had significant effects on language performance and
             processing. Importantly, Met carriers outperformed Val
             homozygotes in language ability during the early elementary
             school years (6-8 years), whereas Val homozygotes exhibited
             significant language development during the later elementary
             school years. Both genotype groups exhibited equal language
             performance at approximately 10 years of age. Val
             homozygotes exhibited significantly less cortical activation
             compared with Met carriers during word processing,
             particularly at older ages. These findings regarding
             dopamine transmission efficacy may be explained by a
             hypothetical inverted U-shaped curve. Our findings indicate
             that the effects of the COMT genotype on language ability
             and cortical language processing may change in a narrow age
             window of 6-10 years.},
   Doi = {10.1093/cercor/bhw371},
   Key = {fds332176}
}

@article{fds318734,
   Author = {Martin, A and Igarashi, Y and Jincho, N and Mazuka,
             R},
   Title = {Utterances in infant-directed speech are shorter, not
             slower.},
   Journal = {Cognition},
   Volume = {156},
   Pages = {52-59},
   Year = {2016},
   Month = {November},
   url = {http://dx.doi.org/10.1016/j.cognition.2016.07.015},
   Abstract = {It has become a truism in the literature on infant-directed
             speech (IDS) that IDS is pronounced more slowly than
             adult-directed speech (ADS). Using recordings of 22 Japanese
             mothers speaking to their infant and to an adult, we show
             that although IDS has an overall lower mean speech rate than
             ADS, this is not the result of an across-the-board slowing
             in which every vowel is expanded equally. Instead, the
             speech rate difference is entirely due to the effects of
             phrase-final lengthening, which disproportionally affects
             IDS because of its shorter utterances. These results
             demonstrate that taking utterance-internal prosodic
             characteristics into account is crucial to studies of speech
             rate.},
   Doi = {10.1016/j.cognition.2016.07.015},
   Key = {fds318734}
}

@article{fds318735,
   Author = {Ludusan, B and Cristia, A and Martin, A and Mazuka, R and Dupoux,
             E},
   Title = {Learnability of prosodic boundaries: Is infant-directed
             speech easier?},
   Journal = {The Journal of the Acoustical Society of
             America},
   Volume = {140},
   Number = {2},
   Pages = {1239},
   Year = {2016},
   Month = {August},
   url = {http://dx.doi.org/10.1121/1.4960576},
   Abstract = {This study explores the long-standing hypothesis that the
             acoustic cues to prosodic boundaries in infant-directed
             speech (IDS) make those boundaries easier to learn than
             those in adult-directed speech (ADS). Three cues (pause
             duration, nucleus duration, and pitch change) were
             investigated, by means of a systematic review of the
             literature, statistical analyses of a corpus of Japanese,
             and machine learning experiments. The review of previous
             work revealed that the effect of register on boundary cues
             is less well established than previously thought, and that
             results often vary across studies for certain cues.
             Statistical analyses run on a large database of mother-child
             and mother-interviewer interactions showed that the duration
             of a pause and the duration of the syllable nucleus
             preceding the boundary are two cues which are enhanced in
             IDS, while f0 change is actually degraded in IDS. Supervised
             and unsupervised machine learning techniques applied to
             these acoustic cues revealed that IDS boundaries were
             consistently better classified than ADS ones, regardless of
             the learning method used. The role of the cues examined in
             this study and the importance of these findings in the more
             general context of early linguistic structure acquisition is
             discussed.},
   Doi = {10.1121/1.4960576},
   Key = {fds318735}
}

@article{fds318736,
   Author = {Tsuji, S and Fikkert, P and Yamane, N and Mazuka,
             R},
   Title = {Language-general biases and language-specific experience
             contribute to phonological detail in toddlers' word
             representations.},
   Journal = {Developmental Psychology},
   Volume = {52},
   Number = {3},
   Pages = {379-390},
   Year = {2016},
   Month = {March},
   url = {http://dx.doi.org/10.1037/dev0000093},
   Abstract = {Although toddlers in their 2nd year of life generally have
             phonologically detailed representations of words, a
             consistent lack of sensitivity to certain kinds of
             phonological changes has been reported. The origin of these
             insensitivities is poorly understood, and uncovering their
             cause is crucial for obtaining a complete picture of early
             phonological development. The present study explored the
             origins of the insensitivity to the change from coronal to
             labial consonants. In cross-linguistic research, we assessed
             to what extent this insensitivity is language-specific (or
             would show both in learners of Dutch and a very different
             language like Japanese), and contrast/direction-specific to
             the coronal-to-labial change (or would also extend to the
             coronal-to-dorsal change). We measured Dutch and Japanese
             18-month-old toddlers' sensitivity to labial and dorsal
             mispronunciations of newly learned coronal-initial words.
             Both Dutch and Japanese toddlers showed reduced sensitivity
             to the coronal-to-labial change, although this effect was
             more pronounced in Dutch toddlers. The lack of sensitivity
             was also specific to the coronal-to-labial change because
             toddlers from both language backgrounds were highly
             sensitive to dorsal mispronunciations. Combined with results
             from previous studies, the present outcomes are most
             consistent with an early, language-general bias specific to
             the coronal-to-labial change, which is modified by the
             properties of toddlers' early, language-specific
             lexicon.},
   Doi = {10.1037/dev0000093},
   Key = {fds318736}
}

@article{fds325973,
   Author = {Jincho, N and Oishi, H and Mazuka, R},
   Title = {Referential ambiguity resolution in sentence comprehension:
             A developmental study measuring eye movements and pupil
             dilation},
   Journal = {The Japanese Journal of Educational Psychology},
   Volume = {64},
   Number = {4},
   Pages = {531-543},
   Publisher = {The Japanese Association of Educational Psychology},
   Year = {2016},
   Month = {January},
   url = {http://dx.doi.org/10.5926/jjep.64.531},
   Abstract = {The present study investigated whether adults and 5-and
             6-year-old children could incrementally resolve referential
             ambiguity of adjective-noun phrases in Japanese. Using a
             visual world paradigm, the experiment examined whether the
             proportion of participants' gaze on the referent and their
             pupil dilations were affected by the timing of
             disambiguation (pre-nominal adjective or noun). The results
             indicated that the proportion of the adults' gazes showed a
             reliable effect of the timing of disambiguation, but this
             was not found in the results from the children. The
             6-year-olds' pupil dilation data showed larger pupil
             dilations in the adjective disambiguation condition than in
             the noun disambiguation condition. This suggests that the
             6-year-olds also incrementally resolved the referential
             ambiguity. Furthermore, the adults showed a disambiguation
             effect, with larger dilations for the noun disambiguations
             than for the adjective disambiguations. No significant
             differences were observed in the data from the 5-year-olds.
             These results suggest that the 6-year-olds and the adults
             were able to resolve referential ambiguities incrementally,
             but that the 6-year-olds' eye movement control was not as
             fully developed as the adults'. In addition, the results
             suggested that pupil dilations could be a complementary
             measure of on-line sentence processing. That would be
             especially advantageous when experimental participants are
             young children.},
   Doi = {10.5926/jjep.64.531},
   Key = {fds325973}
}

@article{fds318737,
   Author = {Nakamura, R and Miyazawa, K and Ishihara, H and Nishikawa, K and Kikuchi, H and Asada, M and Mazuka, R},
   Title = {Constructing the corpus of infant-directed speech and
             infant-like robot-directed speech},
   Journal = {Hai 2015 Proceedings of the 3rd International Conference on
             Human Agent Interaction},
   Pages = {167-169},
   Year = {2015},
   Month = {October},
   ISBN = {9781450335270},
   url = {http://dx.doi.org/10.1145/2814940.2814965},
   Abstract = {© 2015 ACM. The characteristics of the spoken language used
             to address infants have been eagerly studied as a part of
             the language acquisition research. Because of the
             uncontrollability factor with regard to the infants, the
             features and roles of infantdirected speech were tried to be
             revealed by the comparison of speech directed toward infants
             and that toward other listeners. However, they share few
             characteristics with infants, while infants have many
             characteristics which may derive the features of IDS. In
             this study, to solve this problem, we will introduce a new
             approach that replaces the infant with an infant-like robot
             which is designed to control its motions and to imitate its
             appearance very similar to a real infant. We have now
             recorded both infant-and infantlike robot-directed speech
             and are constructing both corpora. Analysis of these corpora
             is expected to contribute to the studies of infant-directed
             speech. In this paper, we discuss the contents of this
             approach and the outline of the corpora.},
   Doi = {10.1145/2814940.2814965},
   Key = {fds318737}
}

@article{fds318738,
   Author = {Mazuka, R and Igarashi, Y and Martin, A and Utsugi,
             A},
   Title = {Infant-directed speech as a window into the dynamic nature
             of phonology},
   Journal = {Laboratory Phonology},
   Volume = {6},
   Number = {3-4},
   Pages = {281-303},
   Publisher = {WALTER DE GRUYTER GMBH},
   Year = {2015},
   Month = {October},
   url = {http://dx.doi.org/10.1515/lp-2015-0009},
   Abstract = {Theoretical frameworks of phonology are built largely on the
             basis of idealized speech, typically recorded in a
             laboratory under static conditions. Natural speech, in
             contrast, occurs in a variety of communicative contexts
             where speakers and hearers dynamically adjust their speech
             to fit their needs. The present paper demonstrates that
             phonologically informed analysis of specialized speech
             registers, such as infant-directed speech, can reveal
             specific ways segmental and supra-segmental aspects of
             phonology are modulated dynamically to accommodate the
             specific communicative needs of speakers and hearers. Data
             for the analyses come from a corpus of Japanese mothers'
             spontaneous speech directed to their infant child
             (infant-directed speech, IDS) and an adult (adult-directed
             speech, ADS), as well as read speech (RS). The speech
             samples in the corpus are annotated with segmental,
             morphological, and intonational information. We will show
             that the way intonation is exaggerated in Japanese IDS
             reflects the intonational structure of Japanese, which is
             different from that of English. We will also demonstrate
             that rules of phonological grammar, such as devoicing of
             high vowels and non-high vowels in Japanese, can be
             differently affected by the needs of the speaker to
             accommodate the specific characteristics of the
             listener.},
   Doi = {10.1515/lp-2015-0009},
   Key = {fds318738}
}

@article{fds252794,
   Author = {Hawthorne, K and Mazuka, R and Gerken, L},
   Title = {The acoustic salience of prosody trumps infants' acquired
             knowledge of language-specific prosodic patterns.},
   Journal = {Journal of Memory and Language},
   Volume = {82},
   Pages = {105-117},
   Year = {2015},
   Month = {July},
   ISSN = {0749-596X},
   url = {http://dx.doi.org/10.1016/j.jml.2015.03.005},
   Abstract = {There is mounting evidence that prosody facilitates grouping
             the speech stream into syntactically-relevant units (e.g.,
             Hawthorne & Gerken, 2014; Soderstrom, Kemler Nelson, &
             Jusczyk, 2005). We ask whether prosody's role in syntax
             acquisition relates to its general acoustic salience or to
             the learner's acquired knowledge of correlations between
             prosody and syntax in her native language. English- and
             Japanese-acquiring 19-month-olds listened to sentences from
             an artificial grammar with non-native prosody (Japanese or
             English, respectively), then were tested on their ability to
             recognize prosodically-marked constituents when the
             constituents had moved to a new position in the sentence.
             Both groups were able to use non-native prosody to parse
             speech into cohesive, reorderable, syntactic
             constituent-like units. Comparison with Hawthorne & Gerken
             (2014), in which English-acquiring infants were tested on
             sentences with English prosody, suggests that 19-month-olds
             are equally adept at using native and non-native prosody for
             at least some types of learning tasks and, therefore, that
             prosody is useful in early syntactic segmentation because of
             its acoustic salience.},
   Doi = {10.1016/j.jml.2015.03.005},
   Key = {fds252794}
}

@article{fds252795,
   Author = {Martin, A and Schatz, T and Versteegh, M and Miyazawa, K and Mazuka, R and Dupoux, E and Cristia, A},
   Title = {Mothers speak less clearly to infants than to adults: a
             comprehensive test of the hyperarticulation
             hypothesis.},
   Journal = {Psychological Science},
   Volume = {26},
   Number = {3},
   Pages = {341-347},
   Year = {2015},
   Month = {March},
   ISSN = {0956-7976},
   url = {http://dx.doi.org/10.1177/0956797614562453},
   Abstract = {Infants learn language at an incredible speed, and one of
             the first steps in this voyage is learning the basic sound
             units of their native languages. It is widely thought that
             caregivers facilitate this task by hyperarticulating when
             speaking to their infants. Using state-of-the-art speech
             technology, we addressed this key theoretical question: Are
             sound categories clearer in infant-directed speech than in
             adult-directed speech? A comprehensive examination of sound
             contrasts in a large corpus of recorded, spontaneous
             Japanese speech demonstrates that there is a small but
             significant tendency for contrasts in infant-directed speech
             to be less clear than those in adult-directed speech. This
             finding runs contrary to the idea that caregivers actively
             enhance phonetic categories in infant-directed speech. These
             results suggest that to be plausible, theories of infants'
             language acquisition must posit an ability to learn from
             noisy data.},
   Doi = {10.1177/0956797614562453},
   Key = {fds252795}
}

@article{fds252797,
   Author = {Hirose, Y and Mazuka, R},
   Title = {Predictive processing of novel compounds: evidence from
             Japanese.},
   Journal = {Cognition},
   Volume = {136},
   Pages = {350-358},
   Year = {2015},
   Month = {March},
   ISSN = {0010-0277},
   url = {http://dx.doi.org/10.1016/j.cognition.2014.11.033},
   Abstract = {Our study argues that pre-head anticipatory processing
             operates at a level below the level of the sentence. A
             visual-world eye-tracking study demonstrated that, in
             processing of Japanese novel compounds, the compound
             structure can be constructed prior to the head if the
             prosodic information on the preceding modifier constituent
             signals that the Compound Accent Rule (CAR) is being
             applied. This prosodic cue rules out the single head
             analysis of the modifier noun, which would otherwise be a
             natural and economical choice. Once the structural
             representation for the head is computed in advance, the
             parser becomes faster in identifying the compound meaning.
             This poses a challenge to models maintaining that structural
             integration and word recognition are separate processes. At
             the same time, our results, together with previous findings,
             suggest the possibility that there is some degree of staging
             during the processing of different sources of information
             during the comprehension of compound nouns.},
   Doi = {10.1016/j.cognition.2014.11.033},
   Key = {fds252797}
}

@article{fds252798,
   Author = {Arai, M and Nakamura, C and Mazuka, R},
   Title = {Predicting the unbeaten path through syntactic
             priming.},
   Journal = {Journal of Experimental Psychology. Learning, Memory, and
             Cognition},
   Volume = {41},
   Number = {2},
   Pages = {482-500},
   Year = {2015},
   Month = {March},
   ISSN = {0278-7393},
   url = {http://dx.doi.org/10.1037/a0038389},
   Abstract = {A number of previous studies showed that comprehenders make
             use of lexically based constraints such as subcategorization
             frequency in processing structurally ambiguous sentences.
             One piece of such evidence is lexically specific syntactic
             priming in comprehension; following the costly processing of
             a temporarily ambiguous sentence, comprehenders experience
             less processing difficulty with the same structure with the
             same verb in subsequent processing. In previous studies
             using a reading paradigm, however, the effect was observed
             at or following disambiguating information and it is not
             known whether a priming effect affects only the process of
             resolving structural ambiguity following disambiguating
             input or it also affects the process before ambiguity is
             resolved. Using a visual world paradigm, the current study
             addressed this issue with Japanese relative clause
             sentences. Our results demonstrated that after experiencing
             the relative clause structure, comprehenders were more
             likely to predict the usually dispreferred structure
             immediately upon hearing the same verb. No compatible
             effect, in contrast, was observed on hearing a different
             verb. Our results are consistent with the constraint-based
             lexicalist view, which assumes the parallel activation of
             possible structural analyses at the verb. Our study
             demonstrated that an experience of a dispreferred structure
             activates the structural information in a lexically specific
             manner, leading comprehenders to predict another instance of
             the same structure on encountering the same
             verb.},
   Doi = {10.1037/a0038389},
   Key = {fds252798}
}

@article{fds252799,
   Author = {Tsuji, S and Mazuka, R and Cristia, A and Fikkert,
             P},
   Title = {Even at 4 months, a labial is a good enough coronal, but not
             vice versa.},
   Journal = {Cognition},
   Volume = {134},
   Pages = {252-256},
   Year = {2015},
   Month = {January},
   ISSN = {0010-0277},
   url = {http://dx.doi.org/10.1016/j.cognition.2014.10.009},
   Abstract = {Numerous studies have revealed an asymmetry tied to the
             perception of coronal place of articulation: participants
             accept a labial mispronunciation of a coronal target, but
             not vice versa. Whether or not this asymmetry is based on
             language-general properties or arises from language-specific
             experience has been a matter of debate. The current study
             suggests a bias of the first type by documenting an early,
             cross-linguistic asymmetry related to coronal place of
             articulation. Japanese and Dutch 4- and 6-month-old infants
             showed evidence of discrimination if they were habituated to
             a labial and then tested on a coronal sequence, but not vice
             versa. This finding has important implications for both
             phonological theories and infant speech perception
             research.},
   Doi = {10.1016/j.cognition.2014.10.009},
   Key = {fds252799}
}

@article{fds252811,
   Author = {Tsuji, S and Nishikawa, K and Mazuka, R},
   Title = {Segmental distributions and consonant-vowel association
             patterns in Japanese infant- and adult-directed
             speech.},
   Journal = {Journal of Child Language},
   Volume = {41},
   Number = {6},
   Pages = {1276-1304},
   Year = {2014},
   Month = {November},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/24229534},
   Abstract = {Japanese infant-directed speech (IDS) and adult-directed
             speech (ADS) were compared on their segmental distributions
             and consonant-vowel association patterns. Consistent with
             findings in other languages, a higher ratio of segments that
             are generally produced early was found in IDS compared to
             ADS: more labial consonants and low-central vowels, but
             fewer fricatives. Consonant-vowel associations also favored
             the early produced labial-central, coronal-front,
             coronal-central, and dorsal-back patterns. On the other
             hand, clear language-specific patterns included a higher
             frequency of dorsals, affricates, geminates, and moraic
             nasals in IDS. These segments are frequent in adult
             Japanese, but not in the early productions or the IDS of
             other studied languages. In combination with previous
             results, the current study suggests that both fine-tuning
             (an increased use of early produced segments) and
             highlighting (an increased use of language-specifically
             relevant segments) might modify IDS on the segmental
             level.},
   Doi = {10.1017/s0305000913000469},
   Key = {fds252811}
}

@article{fds252803,
   Author = {Gonzalez-Gomez, N and Hayashi, A and Tsuji, S and Mazuka, R and Nazzi,
             T},
   Title = {The role of the input on the development of the LC bias: a
             crosslinguistic comparison.},
   Journal = {Cognition},
   Volume = {132},
   Number = {3},
   Pages = {301-311},
   Year = {2014},
   Month = {September},
   ISSN = {0010-0277},
   url = {http://dx.doi.org/10.1016/j.cognition.2014.04.004},
   Abstract = {Previous studies have described the existence of a
             phonotactic bias called the Labial-Coronal (LC) bias,
             corresponding to a tendency to produce more words beginning
             with a labial consonant followed by a coronal consonant
             (i.e. "bat") than the opposite CL pattern (i.e. "tap"). This
             bias has initially been interpreted in terms of articulatory
             constraints of the human speech production system. However,
             more recently, it has been suggested that this presumably
             language-general LC bias in production might be accompanied
             by LC and CL biases in perception, acquired in infancy on
             the basis of the properties of the linguistic input. The
             present study investigates the origins of these perceptual
             biases, testing infants learning Japanese, a language that
             has been claimed to possess more CL than LC sequences, and
             comparing them with infants learning French, a language
             showing a clear LC bias in its lexicon. First, a corpus
             analysis of Japanese IDS and ADS revealed the existence of
             an overall LC bias, except for plosive sequences in ADS,
             which show a CL bias across counts. Second, speech
             preference experiments showed a perceptual preference for CL
             over LC plosive sequences (all recorded by a Japanese
             speaker) in 13- but not in 7- and 10-month-old
             Japanese-learning infants (Experiment 1), while revealing
             the emergence of an LC preference between 7 and 10 months in
             French-learning infants, using the exact same stimuli. These
             crosslinguistic behavioral differences, obtained with the
             same stimuli, thus reflect differences in processing in two
             populations of infants, which can be linked to differences
             in the properties of the lexicons of their respective native
             languages. These findings establish that the emergence of a
             CL/LC bias is related to exposure to a linguistic
             input.},
   Doi = {10.1016/j.cognition.2014.04.004},
   Key = {fds252803}
}

@article{fds252804,
   Author = {Martin, A and Utsugi, A and Mazuka, R},
   Title = {The multidimensional nature of hyperspeech: evidence from
             Japanese vowel devoicing.},
   Journal = {Cognition},
   Volume = {132},
   Number = {2},
   Pages = {216-228},
   Year = {2014},
   Month = {August},
   ISSN = {0010-0277},
   url = {http://dx.doi.org/10.1016/j.cognition.2014.04.003},
   Abstract = {We investigate the hypothesis that infant-directed speech is
             a form of hyperspeech, optimized for intelligibility, by
             focusing on vowel devoicing in Japanese. Using a corpus of
             infant-directed and adult-directed Japanese, we show that
             speakers implement high vowel devoicing less often when
             speaking to infants than when speaking to adults, consistent
             with the hyperspeech hypothesis. The same speakers, however,
             increase vowel devoicing in careful, read speech, a speech
             style which might be expected to pattern similarly to
             infant-directed speech. We argue that both infant-directed
             and read speech can be considered listener-oriented speech
             styles-each is optimized for the specific needs of its
             intended listener. We further show that in non-high vowels,
             this trend is reversed: speakers devoice more often in
             infant-directed speech and less often in read speech,
             suggesting that devoicing in the two types of vowels is
             driven by separate mechanisms in Japanese.},
   Doi = {10.1016/j.cognition.2014.04.003},
   Key = {fds252804}
}

@article{fds252808,
   Author = {Mazuka, R and Hasegawa, M and Tsuji, S},
   Title = {Development of non-native vowel discrimination: Improvement
             without exposure.},
   Journal = {Developmental Psychobiology},
   Volume = {56},
   Number = {2},
   Pages = {192-209},
   Year = {2014},
   Month = {February},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/24374789},
   Abstract = {The present study tested Japanese 4.5- and 10-month old
             infants' ability to discriminate three German vowel pairs,
             none of which are contrastive in Japanese, using a visual
             habituation-dishabituation paradigm. Japanese adults'
             discrimination of the same pairs was also tested. The
             results revealed that Japanese 4.5-month old infants
             discriminated the German /bu:k/-/by:k/ contrast, but they
             showed no evidence of discriminating the /bi:k/-/be:k/ or
             /bu:k/-/bo:k/ contrasts. Japanese 10-month old infants, on
             the other hand, discriminated the German /bi:k/-/be:k/
             contrast, while they showed no evidence of discriminating
             the /bu:k/-/by:k/ or /bu:k/-/bo:k/ contrasts. Japanese
             adults, in contrast, were highly accurate in their
             discrimination of all of the pairs. The results indicate
             that discrimination of non-native contrasts is not always
             easy even for young infants, and that their ability to
             discriminate non-native contrasts can improve with age even
             when they receive no exposure to a language in which the
             given contrast is phonemic.},
   Doi = {10.1002/dev.21193},
   Key = {fds252808}
}

@article{fds252800,
   Author = {Matsuda, Y-T and Ueno, K and Cheng, K and Konishi, Y and Mazuka, R and Okanoya, K},
   Title = {Auditory observation of infant-directed speech by mothers:
             experience-dependent interaction between language and
             emotion in the basal ganglia.},
   Journal = {Frontiers in Human Neuroscience},
   Volume = {8},
   Pages = {907},
   Publisher = {FRONTIERS RESEARCH FOUNDATION},
   Year = {2014},
   Month = {January},
   ISSN = {1662-5161},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000344452300001&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Abstract = {Adults address infants with a special speech register known
             as infant-directed speech (IDS), which conveys both
             linguistic and emotional information through its
             characteristic lexicon and exaggerated prosody (e.g., higher
             pitched, slower, and hyperarticulated). Although caregivers
             are known to regulate the usage of IDS (linguistic and
             emotional components) depending on their child's
             development, the underlying neural substrates of this
             flexible modification are largely unknown. Here, using an
             auditory observation method and functional magnetic
             resonance imaging (fMRI) of four different groups of
             females, we revealed the experience-dependent influence of
             the emotional component on linguistic processing in the
             right caudate nucleus when mothers process IDS: (1)
             non-mothers, who do not use IDS regularly, showed no
             significant difference between IDS and adult-directed speech
             (ADS); (2) mothers with preverbal infants, who primarily use
             the emotional component of IDS, showed the main effect of
             the emotional component of IDS; (3) mothers with toddlers at
             the two-word stage, who use both linguistic and emotional
             components of IDS, showed an interaction between the
             linguistic and emotional components of IDS; and (4) mothers
             with school-age children, who use ADS rather than IDS toward
             their children, showed a tendency toward the main effect of
             ADS. The task that was most comparable to the naturalistic
             categories of IDS (i.e., explicit-language and
             implicit-emotion processing) recruited the right caudate
             nucleus, but it was not recruited in the control, less
             naturalistic condition (explicit-emotion and
             implicit-language processing). Our results indicate that the
             right caudate nucleus processes experience-and
             task-dependent interactions between language and emotion in
             mothers' IDS.},
   Doi = {10.3389/fnhum.2014.00907},
   Key = {fds252800}
}

@article{fds252802,
   Author = {Jincho, N and Feng, G and Mazuka, R},
   Title = {Development of text reading in Japanese: An eye movement
             study},
   Journal = {Reading and Writing},
   Volume = {27},
   Number = {8},
   Pages = {1437-1465},
   Year = {2014},
   Month = {January},
   ISSN = {0922-4777},
   url = {http://dx.doi.org/10.1007/s11145-014-9500-9},
   Abstract = {This study examined age-group differences in eye movements
             among third-grade, fifth-grade, and adult Japanese readers.
             In Experiment 1, Japanese children, but not adults, showed a
             longer fixation time on logographic kanji words than on
             phonologically transparent hiragana words. Further, an
             age-group difference was found in the first fixation
             duration on hiragana words but not on kanji words,
             suggesting character-type-dependent reading development in
             Japanese children. Examination of the distributions of
             saccade landing positions revealed that, like adults, both
             third and fifth graders fixated more on kanji than on
             hiragana characters, which suggests that even young children
             utilize the same oculomotor control strategy (the kanji
             targeting strategy) as Japanese adults. In Experiment 2, we
             examined whether the proportion of kanji characters in a
             text affected adult reading performance. Japanese adults
             made more refixations and regressions in texts with a high
             proportion of hiragana characters. The results of both
             experiments suggest that differences between kanji and kana
             affect the reading efficiency of school-age children and
             that maturation of reading skills allows adults to optimize
             their strategy in reading kanji and kana mixed texts. ©
             2014 Springer Science+Business Media Dordrecht.},
   Doi = {10.1007/s11145-014-9500-9},
   Key = {fds252802}
}

@article{fds252816,
   Author = {Arai, M and Mazuka, R},
   Title = {The development of Japanese passive syntax as indexed by
             structural priming in comprehension.},
   Journal = {Quarterly Journal of Experimental Psychology
             (2006)},
   Volume = {67},
   Number = {1},
   Pages = {60-78},
   Year = {2014},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23663220},
   Abstract = {A number of previous studies reported a phenomenon of
             syntactic priming with young children as evidence for
             cognitive representations required for processing syntactic
             structures. However, it remains unclear how syntactic
             priming reflects children's grammatical competence. The
             current study investigated structural priming of the
             Japanese passive structure with 5- and 6-year-old children
             in a visual-world setting. Our results showed a priming
             effect as anticipatory eye movements to an upcoming referent
             in these children but the effect was significantly stronger
             in magnitude in 6-year-olds than in 5-year-olds.
             Consistently, the responses to comprehension questions
             revealed that 6-year-olds produced a greater number of
             correct answers and more answers using the passive structure
             than 5-year-olds. We also tested adult participants who
             showed even stronger priming than the children. The results
             together revealed that language users with the greater
             linguistic competence with the passives exhibited stronger
             priming, demonstrating a tight relationship between the
             effect of priming and the development of grammatical
             competence. Furthermore, we found that the magnitude of the
             priming effect decreased over time. We interpret these
             results in the light of an error-based learning account. Our
             results also provided evidence for prehead as well as
             head-independent priming.},
   Doi = {10.1080/17470218.2013.790454},
   Key = {fds252816}
}

@article{fds252806,
   Author = {Jincho, N and Feng, G and Mazuka, R},
   Title = {Development of text reading in Japanese: an eye movement
             study},
   Journal = {Reading and Writing},
   Volume = {27},
   Number = {8},
   Pages = {1-29},
   Publisher = {Springer Nature},
   Year = {2014},
   ISSN = {0922-4777},
   url = {http://dx.doi.org/10.1007/s11145-014-9500-9},
   Doi = {10.1007/s11145-014-9500-9},
   Key = {fds252806}
}

@article{fds252812,
   Author = {Sato, Y and Utsugi, A and Yamane, N and Koizumi, M and Mazuka,
             R},
   Title = {Dialectal differences in hemispheric specialization for
             Japanese lexical pitch accent.},
   Journal = {Brain and Language},
   Volume = {127},
   Number = {3},
   Pages = {475-483},
   Year = {2013},
   Month = {December},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/24139706},
   Abstract = {Language experience can alter perceptual abilities and the
             neural specialization for phonological contrasts. Here we
             investigated whether dialectal differences in the lexical
             use of pitch information lead to differences in functional
             lateralization for pitch processing. We measured cortical
             hemodynamic responses to pitch pattern changes in native
             speakers of Standard (Tokyo) Japanese, which has a lexical
             pitch accent system, and native speakers of 'accentless'
             dialects, which do not have any lexical tonal phenomena.
             While the Standard Japanese speakers showed left-dominant
             responses in temporal regions to pitch pattern changes
             within words, the accentless dialects speakers did not show
             such left-dominance. Pitch pattern changes within
             harmonic-complex tones also elicited different brain
             activation patterns between the two groups. These results
             indicate that the neural processing of pitch information
             differs depending on the listener's native dialect, and that
             listeners' linguistic experiences may further affect the
             processing of pitch changes even for non-linguistic
             sounds.},
   Doi = {10.1016/j.bandl.2013.09.008},
   Key = {fds252812}
}

@article{fds252841,
   Author = {Zervakis, J and Mazuka, R},
   Title = {Effect of repeated evaluation and repeated exposure on
             acceptability ratings of sentences.},
   Journal = {Journal of Psycholinguistic Research},
   Volume = {42},
   Number = {6},
   Pages = {505-525},
   Year = {2013},
   Month = {December},
   ISSN = {0090-6905},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23179954},
   Abstract = {This study investigated the effect of repeated evaluation
             and repeated exposure on grammatical acceptability ratings
             for both acceptable and unacceptable sentence types. In
             Experiment 1, subjects in the Experimental group rated
             multiple examples of two ungrammatical sentence types
             (ungrammatical binding and double object with dative-only
             verb), and two difficult to process sentence types
             [center-embedded (2) and garden path ambiguous relative],
             along with matched grammatical/non-difficult sentences,
             before rating a final set of experimental sentences.
             Subjects in the control group rated unrelated sentences
             during the exposure period before rating the experimental
             sentences. Subjects in the Experimental group rated both
             grammatical and ungrammatical sentences as more acceptable
             after repeated evaluation than subjects in the Control
             group. In Experiment 2, subjects answered a comprehension
             question after reading each sentence during the exposure
             period. Subjects in the experimental group rated garden path
             and center-embedded (1) sentences as higher in acceptability
             after comprehension exposure than subjects in the control
             group. The results are consistent with increased fluency of
             comprehension being misattributed as a change in
             acceptability.},
   Doi = {10.1007/s10936-012-9233-3},
   Key = {fds252841}
}

@article{fds252814,
   Author = {Igarashi, Y and Nishikawa, K and Tanaka, K and Mazuka,
             R},
   Title = {Phonological theory informs the analysis of intonational
             exaggeration in Japanese infant-directed
             speech.},
   Journal = {The Journal of the Acoustical Society of
             America},
   Volume = {134},
   Number = {2},
   Pages = {1283-1294},
   Year = {2013},
   Month = {August},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23927126},
   Abstract = {To date, the intonation of infant-directed speech (IDS) has
             been analyzed without reference to its phonological
             structure. Intonational phonology should, however, inform
             IDS research, discovering important properties that have
             previously been overlooked. The present study investigated
             "intonational exaggeration" in Japanese IDS using the
             intonational phonological framework. Although intonational
             exaggeration, which is most often measured by pitch-range
             expansion, is one of the best-known characteristics of IDS,
             Japanese has been reported to lack such exaggeration. The
             present results demonstrated that intonational exaggeration
             is in fact present and observed most notably at the location
             of boundary pitch movements, and that the effects of lexical
             pitch accents in the remainder of the utterances
             superficially mask the exaggeration. These results not only
             reveal dynamic aspects of Japanese IDS, but also in turn
             contribute to the theory of intonational phonology,
             suggesting that paralinguistic pitch-range modifications
             most clearly emerge where the intonation system of a
             language allows maximum flexibility in varying intonational
             contours.},
   Doi = {10.1121/1.4812755},
   Key = {fds252814}
}

@article{fds252801,
   Author = {Tajima, K and Tanaka, K and Martin, A and Mazuka,
             R},
   Title = {Is the vowel length contrast in japanese exaggerated in
             infant-directed speech?},
   Journal = {Proceedings of the Annual Conference of the International
             Speech Communication Association, Interspeech},
   Pages = {3211-3215},
   Year = {2013},
   Month = {January},
   ISSN = {2308-457X},
   Abstract = {Vowel length contrasts in Japanese, e.g., chizu "map" vs.
             chiizu "cheese", are cued primarily by vowel duration.
             However, since short and long vowel durations overlap
             considerably in ordinary speech, learning to perceive vowel
             length contrasts is complex. Meanwhile, infant-directed
             speech (IDS) is known to "exaggerate" certain properties of
             adult-directed speech (ADS). If so, then it is possible that
             vowel length contrasts might also be exaggerated in IDS. To
             investigate this, the present study analyzed vowel durations
             in the RIKEN Japanese Mother-Infant Conversation Corpus,
             which contains 11 hours of IDS by 22 mothers talking with
             their 18-to-24-month-old infants, and 3 hours of ADS by the
             same mothers. Results indicated that vowel length contrasts
             were generally not exaggerated in IDS, except at the end of
             prosodic phrases. Furthermore, several factors that
             systematically affected vowel duration in IDS were
             identified, including phrase-final lengthening and
             "non-lexical lengthening", i.e., the lengthening of vowels
             for emphatic or other stylistic purposes. These results
             suggest that vowel duration in Japanese IDS could not only
             potentially facilitate learning of lexical distinctions, but
             also signal phrase boundaries, emphasis, or other
             communicative functions. Copyright © 2013
             ISCA.},
   Key = {fds252801}
}

@article{fds252813,
   Author = {Gervain, J and Sebastián-Gallés, N and Díaz, B and Laka, I and Mazuka, R and Yamane, N and Nespor, M and Mehler,
             J},
   Title = {Word frequency cues word order in adults: cross-linguistic
             evidence.},
   Journal = {Frontiers in Psychology},
   Volume = {4},
   Pages = {689},
   Year = {2013},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/24106483},
   Abstract = {One universal feature of human languages is the division
             between grammatical functors and content words. From a
             learnability point of view, functors might provide entry
             points or anchors into the syntactic structure of utterances
             due to their high frequency. Despite its potentially
             universal scope, this hypothesis has not yet been tested on
             typologically different languages and on populations of
             different ages. Here we report a corpus study and an
             artificial grammar learning experiment testing the anchoring
             hypothesis in Basque, Japanese, French, and Italian adults.
             We show that adults are sensitive to the distribution of
             functors in their native language and use them when learning
             new linguistic material. However, compared to infants'
             performance on a similar task, adults exhibit a slightly
             different behavior, matching the frequency distributions of
             their native language more closely than infants do. This
             finding bears on the issue of the continuity of language
             learning mechanisms.},
   Doi = {10.3389/fpsyg.2013.00689},
   Key = {fds252813}
}

@article{fds252838,
   Author = {Bion, RAH and Miyazawa, K and Kikuchi, H and Mazuka,
             R},
   Title = {Learning phonemic vowel length from naturalistic recordings
             of Japanese infant-directed speech.},
   Journal = {Plos One},
   Volume = {8},
   Number = {2},
   Pages = {e51594},
   Year = {2013},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23437036},
   Abstract = {In Japanese, vowel duration can distinguish the meaning of
             words. In order for infants to learn this phonemic contrast
             using simple distributional analyses, there should be
             reliable differences in the duration of short and long
             vowels, and the frequency distribution of vowels must make
             these differences salient enough in the input. In this
             study, we evaluate these requirements of phonemic learning
             by analyzing the duration of vowels from over 11 hours of
             Japanese infant-directed speech. We found that long vowels
             are substantially longer than short vowels in the input
             directed to infants, for each of the five oral vowels.
             However, we also found that learning phonemic length from
             the overall distribution of vowel duration is not going to
             be easy for a simple distributional learner, because of the
             large base-rate effect (i.e., 94% of vowels are short), and
             because of the many factors that influence vowel duration
             (e.g., intonational phrase boundaries, word boundaries, and
             vowel height). Therefore, a successful learner would need to
             take into account additional factors such as prosodic and
             lexical cues in order to discover that duration can contrast
             the meaning of words in Japanese. These findings highlight
             the importance of taking into account the naturalistic
             distributions of lexicons and acoustic cues when modeling
             early phonemic learning.},
   Doi = {10.1371/journal.pone.0051594},
   Key = {fds252838}
}

@article{fds252815,
   Author = {Tajima, K and Tanaka, K and Martin, A and Mazuka,
             R},
   Title = {Is the mora rhythm of Japanese more strongly observed in
             infant-directed speech than in adult-directed
             speech?},
   Journal = {Proceedings of Meetings on Acoustics},
   Volume = {19},
   Number = {5},
   Pages = {3341},
   Year = {2013},
   ISSN = {1939-800X},
   url = {http://dx.doi.org/10.1121/1.4800508},
   Abstract = {Japanese has traditionally been called "mora-timed", but
             studies have shown that this intuition is based not on
             durational tendencies but rather on phonological, structural
             factors in the language. Meanwhile, infant-directed speech
             (IDS) is said to "exaggerate" certain properties of
             adult-directed speech (ADS), including rhythm. If so, then
             it is possible that the mora rhythm of Japanese is more
             strongly observed in IDS than ADS. To investigate this
             possibility, the present study utilized the RIKEN Japanese
             Mother-Infant Conversation Corpus, which contains
             approximately 11 hours of IDS by 22 mothers talking with
             their 18-to-24-month-old infants, and 3 hours of ADS by the
             same mothers. Results from durational analyses showed that
             aspects of mora rhythm, such as the distinction between
             phonemically short and long vowels and singleton and
             geminate consonants, and the tendency toward isochrony of
             moras, were not greater in IDS than ADS. Mora duration in
             IDS was highly variable, partly stemming from greater
             phrase-final lengthening and non-phonemic, emphatic
             lengthening. Results from structural analysis, however,
             showed that non-CV moras such as nasal moras that
             characterize Japanese rhythm occurred more frequently in IDS
             than ADS. These results suggest that even in IDS, Japanese
             rhythm is manifested structurally, not durationally. © 2013
             Acoustical Society of America.},
   Doi = {10.1121/1.4800508},
   Key = {fds252815}
}

@article{fds252817,
   Author = {Saikachi, Y and Kitahara, M and Nishikawa, K and Kanato, A and Mazuka,
             R},
   Title = {The F0 fall delay of lexical pitch accent in Japanese
             Infant-directed speech},
   Journal = {13th Annual Conference of the International Speech
             Communication Association 2012, Interspeech
             2012},
   Volume = {3},
   Pages = {2485-2488},
   Publisher = {ISCA},
   Year = {2012},
   Month = {December},
   ISBN = {9781622767595},
   url = {http://www.isca-speech.org/archive/interspeech_2012},
   Abstract = {The current study examined the acoustic modifications of the
             lexical pitch accent in Tokyo Japanese infant-directed
             speech (IDS), with the focus on the F0 fall delay, where the
             alignment of the F0 turning points associated with pitch
             accents were delayed with respect to the accented mora. The
             RIKEN Mother- Infant Conversation Corpus (R-JMICC) [1]
             produced by 21 mothers from Tokyo area, was used to
             investigate the alignment of the F0 turning points.
             Two-piece linear regression was used to locate the turning
             points and the frequency of F0 fall delay was computed in
             IDS and in adult-directed speech (ADS). The results revealed
             that the frequency of F0 fall delay depended on the syllable
             structures of the accented syllable as well as the prosodic
             conditions (the presence of the boundary pitch movements and
             non-lexical lengthening) typically observed in Japanese IDS.
             We found significantly more frequent F0 fall delay in IDS
             compared to ADS, when the prosodic conditions were taken
             into account. The results indicate that the language
             specific prosodic structure should be considered in order to
             characterize the F0 fall delay of lexical pitch accents in
             IDS.},
   Key = {fds252817}
}

@article{fds252840,
   Author = {Tsuji, S and Gomez, NG and Medina, V and Nazzi, T and Mazuka,
             R},
   Title = {The labial-coronal effect revisited: Japanese adults say
             pata, but hear tapa.},
   Journal = {Cognition},
   Volume = {125},
   Number = {3},
   Pages = {413-428},
   Year = {2012},
   Month = {December},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22921188},
   Abstract = {The labial-coronal effect has originally been described as a
             bias to initiate a word with a labial consonant-vowel-coronal
             consonant (LC) sequence. This bias has been explained with
             constraints on the human speech production system, and its
             perceptual correlates have motivated the suggestion of a
             perception-production link. However, previous studies
             exclusively considered languages in which LC sequences are
             globally more frequent than their counterpart. The current
             study examined the LC bias in speakers of Japanese, a
             language that has been claimed to possess more CL than LC
             sequences. We first conducted an analysis of Japanese
             corpora that qualified this claim, and identified a subgroup
             of consonants (plosives) exhibiting a CL bias. Second,
             focusing on this subgroup of consonants, we found diverging
             results for production and perception such that Japanese
             speakers exhibited an articulatory LC bias, but a perceptual
             CL bias. The CL perceptual bias, however, was modulated by
             language of presentation, and was only present for stimuli
             recorded by a Japanese, but not a French, speaker. A further
             experiment with native speakers of French showed the
             opposite effect, with an LC bias for French stimuli only.
             Overall, we find support for a universal, articulatory
             motivated LC bias in production, supporting a motor
             explanation of the LC effect, while perceptual biases are
             influenced by distributional frequencies of the native
             language.},
   Doi = {10.1016/j.cognition.2012.07.017},
   Key = {fds252840}
}

@article{fds252835,
   Author = {Minai, U and Jincho, N and Yamane, N and Mazuka, R},
   Title = {What hinders child semantic computation: children's
             universal quantification and the development of cognitive
             control.},
   Journal = {Journal of Child Language},
   Volume = {39},
   Number = {5},
   Pages = {919-956},
   Year = {2012},
   Month = {November},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22182242},
   Abstract = {Recent studies on the acquisition of semantics have argued
             that knowledge of the universal quantifier is adult-like
             throughout development. However, there are domains where
             children still exhibit non-adult-like universal
             quantification, and arguments for the early mastery of
             relevant semantic knowledge do not explain what causes such
             non-adult-like interpretations. The present study
             investigates Japanese four- and five-year-old children's
             atypical universal quantification in light of the
             development of cognitive control. We hypothesized that
             children's still-developing cognitive control contributes to
             their atypical universal quantification. Using a combined
             eye-tracking and interpretation task together with a
             non-linguistic measure of cognitive control, we revealed a
             link between the achievement of adult-like universal
             quantification and the development of flexible
             perspective-switch. We argue that the development of
             cognitive control is one of the factors that contribute to
             children's processing of semantics.},
   Doi = {10.1017/s0305000911000316},
   Key = {fds252835}
}

@article{fds304695,
   Author = {Nakamura, C and Arai, M and Mazuka, R},
   Title = {Immediate use of prosody and context in predicting a
             syntactic structure.},
   Journal = {Cognition},
   Volume = {125},
   Number = {2},
   Pages = {317-323},
   Year = {2012},
   Month = {November},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22901508},
   Abstract = {Numerous studies have reported an effect of prosodic
             information on parsing but whether prosody can impact even
             the initial parsing decision is still not evident. In a
             visual world eye-tracking experiment, we investigated the
             influence of contrastive intonation and visual context on
             processing temporarily ambiguous relative clause sentences
             in Japanese. Our results showed that listeners used the
             prosodic cue to make a structural prediction before hearing
             disambiguating information. Importantly, the effect was
             limited to cases where the visual scene provided an
             appropriate context for the prosodic cue, thus eliminating
             the explanation that listeners have simply associated marked
             prosodic information with a less frequent structure.
             Furthermore, the influence of the prosodic information was
             also evident following disambiguating information, in a way
             that reflected the initial analysis. The current study
             demonstrates that prosody, when provided with an appropriate
             context, influences the initial syntactic analysis and also
             the subsequent cost at disambiguating information. The
             results also provide first evidence for pre-head structural
             prediction driven by prosodic and contextual information
             with a head-final construction.},
   Doi = {10.1016/j.cognition.2012.07.016},
   Key = {fds304695}
}

@article{fds252836,
   Author = {Sato, Y and Kato, M and Mazuka, R},
   Title = {Development of single/geminate obstruent discrimination by
             Japanese infants: early integration of durational and
             nondurational cues.},
   Journal = {Developmental Psychology},
   Volume = {48},
   Number = {1},
   Pages = {18-34},
   Year = {2012},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21967561},
   Abstract = {The Japanese language has single/geminate obstruents
             characterized by durational difference in closure/frication
             as part of the phonemic repertoire used to distinguish word
             meanings. We first evaluated infants' abilities to
             discriminate naturally uttered single/geminate obstruents
             (/pata/ and /patta/) using the visual habituation-dishabituation
             method. The results revealed that 9.5-month-old Japanese
             infants were able to make this discrimination, t(21) =
             2.119, p = .046, paired t test, whereas 4-month-olds were
             not, t(25) = 0.395, p = .696, paired t test. To examine how
             acoustic correlates (covarying cues) are associated with the
             contrast discrimination, we tested Japanese infants at 9.5
             and 11.5 months of age with 3 combinations of natural and
             manipulated stimuli. The 11.5-month-olds were able to
             discriminate the naturally uttered pair (/pata/ vs.
             /patta/), t(20) = 4.680, p < .000, paired t test. Neither
             group discriminated the natural /patta/ from the manipulated
             /pata/ created from natural /patta/ tokens: For
             9.5-month-olds, t(23) = 0.754, p = .458; for
             11.5-month-olds, t(27) = 0.789, p = .437, paired t tests.
             Only the 11.5-month-olds discriminated the natural /pata/
             and the manipulated /patta/ created from /pata/ tokens: For
             9.5-month-olds, t(24) = 0.114, p = .910; for
             11.5-month-olds, t(23) = 2.244, p = .035, paired t tests.
             These results suggest that Japanese infants acquire a
             sensitivity to contrasts of single/geminate obstruents by
             9.5 months of age and that certain cues that covary with
             closure length either facilitate or interfere with contrast
             discrimination under particular conditions.},
   Doi = {10.1037/a0025528},
   Key = {fds252836}
}

@article{fds252837,
   Author = {Ito, K and Jincho, N and Minai, U and Yamane, N and Mazuka,
             R},
   Title = {Intonation facilitates contrast resolution: Evidence from
             Japanese adults and 6-year olds},
   Journal = {Journal of Memory and Language},
   Volume = {66},
   Number = {1},
   Pages = {265-284},
   Publisher = {Elsevier BV},
   Year = {2012},
   Month = {January},
   ISSN = {0749-596X},
   url = {http://dx.doi.org/10.1016/j.jml.2011.09.002},
   Abstract = {Two eye-tracking experiments tested how pitch prominence on
             a prenominal adjective affects contrast resolution in
             Japanese adult and 6-year old listeners. Participants
             located two animals in succession on displays with multiple
             colored animals. In Experiment 1, adults' fixations to the
             contrastive target (pink cat → GREEN cat) were facilitated
             by a pitch expansion on the adjective while infelicitous
             pitch expansion (purple rabbit → ORANGE monkey) led to a
             garden-path effect, i.e., frequent fixations to the
             incorrect target (orange rabbit). In 6-year olds, only the
             facilitation effect surfaced. Hypothesizing that the
             interval between the two questions may not have given enough
             time for children to overcome their tendency to perseverate
             on the first target, Experiment 2 used longer intervals and
             confirmed a garden-path effect in 6-year olds. These results
             demonstrate that Japanese 6-year olds can make use of
             contrast-marking pitch prominence when time allows an
             establishment of proper discourse representation. © 2011
             Elsevier Inc.},
   Doi = {10.1016/j.jml.2011.09.002},
   Key = {fds252837}
}

@article{fds252833,
   Author = {Mazuka, R},
   Title = {“Nyuji no onsei hattatsu” (In Japanese). (Development of
             infant speech perception)},
   Journal = {The Journal of the Acoustical Society of
             Japan,},
   Volume = {68},
   Number = {5},
   Pages = {241-247},
   Year = {2012},
   Key = {fds252833}
}

@article{fds252834,
   Author = {Nakamura, C and Arai, M and Mazuka, R},
   Title = {Immediate use of prosody and context in predicting a
             syntactic structure},
   Journal = {Cognition},
   Volume = {125},
   Number = {3},
   Pages = {413-428},
   Year = {2012},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22901508},
   Abstract = {Numerous studies have reported an effect of prosodic
             information on parsing but whether prosody can impact even
             the initial parsing decision is still not evident. In a
             visual world eye-tracking experiment, we investigated the
             influence of contrastive intonation and visual context on
             processing temporarily ambiguous relative clause sentences
             in Japanese. Our results showed that listeners used the
             prosodic cue to make a structural prediction before hearing
             disambiguating information. Importantly, the effect was
             limited to cases where the visual scene provided an
             appropriate context for the prosodic cue, thus eliminating
             the explanation that listeners have simply associated marked
             prosodic information with a less frequent structure.
             Furthermore, the influence of the prosodic information was
             also evident following disambiguating information, in a way
             that reflected the initial analysis. The current study
             demonstrates that prosody, when provided with an appropriate
             context, influences the initial syntactic analysis and also
             the subsequent cost at disambiguating information. The
             results also provide first evidence for pre-head structural
             prediction driven by prosodic and contextual information
             with a head-final construction.},
   Doi = {10.1016/j.cognition.2012.07.016},
   Key = {fds252834}
}

@article{fds252832,
   Author = {Kouki, M and Hideaki, M and Hideaki, K and Reiko,
             M},
   Title = {The multi timescale phoneme acquisition model of the
             self-organizing based on the dynamic features},
   Journal = {Proceedings of the Annual Conference of the International
             Speech Communication Association, Interspeech},
   Pages = {749-752},
   Year = {2011},
   Month = {December},
   ISSN = {1990-9772},
   Abstract = {It is unclear as to how infants learn the acoustic
             expression of each phoneme of their native languages. In
             recent studies, researchers have inspected phoneme
             acquisition by using a computational model. However, these
             studies have used a limited vocabulary as input and do not
             handle a continuous speech that is almost comparable to a
             natural environment. Therefore, we use a natural continuous
             speech and build a self-organization model that simulates
             the cognitive ability of the humans, and we analyze the
             quality and quantity of the speech information that is
             necessary for the acquisition of the native phoneme system.
             Our model is designed to learn values of the acoustic
             features of a continuous speech and to estimate the number
             and boundaries of the phoneme categories without using
             explicit instructions. In a recent study, our model could
             acquire the detailed vowels of the input language. In this
             study, we examined the mechanism necessary for an infant to
             acquire all the phonemes of a language, including
             consonants. In natural speech, vowels have a stationary
             feature; hence, our recent model is suitable for learning
             them. However, learning consonants through the past model is
             difficult because most consonants have more dynamic features
             than vowels. To solve this problem, we designed a method to
             separate "stable" and "dynamic" speech patterns using a
             feature-extraction method based on the auditory expressions
             used by human beings. Using this method, we showed that the
             acquisition of an unstable phoneme was possible without the
             use of instructions. Copyright © 2011 ISCA.},
   Key = {fds252832}
}

@article{fds252845,
   Author = {Mazuka, R and Cao, Y and Dupoux, E and Christophe,
             A},
   Title = {The development of a phonological illusion: a
             cross-linguistic study with Japanese and French
             infants.},
   Journal = {Developmental Science},
   Volume = {14},
   Number = {4},
   Pages = {693-699},
   Year = {2011},
   Month = {July},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21676090},
   Abstract = {In adults, native language phonology has strong perceptual
             effects. Previous work has shown that Japanese speakers,
             unlike French speakers, break up illegal sequences of
             consonants with illusory vowels: they report hearing abna as
             abuna. To study the development of phonological grammar, we
             compared Japanese and French infants in a discrimination
             task. In Experiment 1, we observed that 14-month-old
             Japanese infants, in contrast to French infants, failed to
             discriminate phonetically varied sets of abna-type and
             abuna-type stimuli. In Experiment 2, 8-month-old French and
             Japanese did not differ significantly from each other. In
             Experiment 3, we found that, like adults, Japanese infants
             can discriminate abna from abuna when phonetic variability
             is reduced (single item). These results show that the
             phonologically induced /u/ illusion is already experienced
             by Japanese infants at the age of 14 months. Hence, before
             having acquired many words of their language, they have
             grasped enough of their native phonological grammar to
             constrain their perception of speech sound
             sequences.},
   Doi = {10.1111/j.1467-7687.2010.01015.x},
   Key = {fds252845}
}

@article{fds252844,
   Author = {Minagawa-Kawai, Y and van der Lely, H and Ramus, F and Sato, Y and Mazuka, R and Dupoux, E},
   Title = {Optical brain imaging reveals general auditory and
             language-specific processing in early infant
             development.},
   Journal = {Cerebral Cortex (New York, N.Y. : 1991)},
   Volume = {21},
   Number = {2},
   Pages = {254-261},
   Year = {2011},
   Month = {February},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20497946},
   Abstract = {This study uses near-infrared spectroscopy in young infants
             in order to elucidate the nature of functional cerebral
             processing for speech. Previous imaging studies of infants'
             speech perception revealed left-lateralized responses to
             native language. However, it is unclear if these activations
             were due to language per se rather than to some low-level
             acoustic correlate of spoken language. Here we compare
             native (L1) and non-native (L2) languages with 3 different
             nonspeech conditions including emotional voices, monkey
             calls, and phase scrambled sounds that provide more
             stringent controls. Hemodynamic responses to these stimuli
             were measured in the temporal areas of Japanese 4
             month-olds. The results show clear left-lateralized
             responses to speech, prominently to L1, as opposed to
             various activation patterns in the nonspeech conditions.
             Furthermore, implementing a new analysis method designed for
             infants, we discovered a slower hemodynamic time course in
             awake infants. Our results are largely explained by
             signal-driven auditory processing. However, stronger
             activations to L1 than to L2 indicate a language-specific
             neural factor that modulates these responses. This study is
             the first to discover a significantly higher sensitivity to
             L1 in 4 month-olds and reveals a neural precursor of the
             functional specialization for the higher cognitive
             network.},
   Doi = {10.1093/cercor/bhq082},
   Key = {fds252844}
}

@article{fds252843,
   Author = {Sato, Y and Mori, K and Koizumi, T and Minagawa-Kawai, Y and Tanaka, A and Ozawa, E and Wakaba, Y and Mazuka, R},
   Title = {Functional lateralization of speech processing in adults and
             children who stutter.},
   Journal = {Frontiers in Psychology},
   Volume = {2},
   Pages = {70},
   Year = {2011},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21687442},
   Abstract = {Developmental stuttering is a speech disorder in fluency
             characterized by repetitions, prolongations, and silent
             blocks, especially in the initial parts of utterances.
             Although their symptoms are motor related, people who
             stutter show abnormal patterns of cerebral hemispheric
             dominance in both anterior and posterior language areas. It
             is unknown whether the abnormal functional lateralization in
             the posterior language area starts during childhood or
             emerges as a consequence of many years of stuttering. In
             order to address this issue, we measured the lateralization
             of hemodynamic responses in the auditory cortex during
             auditory speech processing in adults and children who
             stutter, including preschoolers, with near-infrared
             spectroscopy. We used the analysis-resynthesis technique to
             prepare two types of stimuli: (i) a phonemic contrast
             embedded in Japanese spoken words (/itta/ vs. /itte/) and
             (ii) a prosodic contrast (/itta/ vs. /itta?/). In the
             baseline blocks, only /itta/ tokens were presented. In
             phonemic contrast blocks, /itta/ and /itte/ tokens were
             presented pseudo-randomly, and /itta/ and /itta?/ tokens in
             prosodic contrast blocks. In adults and children who do not
             stutter, there was a clear left-hemispheric advantage for
             the phonemic contrast compared to the prosodic contrast.
             Adults and children who stutter, however, showed no
             significant difference between the two stimulus conditions.
             A subject-by-subject analysis revealed that not a single
             subject who stutters showed a left advantage in the phonemic
             contrast over the prosodic contrast condition. These results
             indicate that the functional lateralization for auditory
             speech processing is in disarray among those who stutter,
             even at preschool age. These results shed light on the
             neural pathophysiology of developmental stuttering.},
   Doi = {10.3389/fpsyg.2011.00070},
   Key = {fds252843}
}

@article{fds252846,
   Author = {Matsuda, Y-T and Ueno, K and Waggoner, RA and Erickson, D and Shimura,
             Y and Tanaka, K and Cheng, K and Mazuka, R},
   Title = {Processing of infant-directed speech by adults.},
   Journal = {Neuroimage},
   Volume = {54},
   Number = {1},
   Pages = {611-621},
   Year = {2011},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20691794},
   Abstract = {Adults typically address infants in a special speech mode
             called infant-directed speech (IDS). IDS is characterized by
             a special prosody (i.e., higher pitched, slower and
             hyperarticulated) and a special lexicon ("baby talk"). Here
             we investigated which areas of the adult brain are involved
             in processing IDS, which aspects of IDS (prosodic or
             lexical) are processed, to what extent the experience of
             being a parent affects the way adults process IDS, and the
             effects of gender and personality on IDS processing. Using
             functional magnetic resonance imaging, we found that mothers
             with preverbal infants showed enhanced activation in the
             auditory dorsal pathway of the language areas, regardless of
             whether they listened to the prosodic or lexical component
             of IDS. We also found that extroverted mothers showed higher
             cortical activation in speech-related motor areas than did
             mothers with lower extroverted personality scores. Increased
             cortical activation levels were not found for fathers,
             non-parents, or mothers with older children.},
   Doi = {10.1016/j.neuroimage.2010.07.072},
   Key = {fds252846}
}

@article{fds252828,
   Author = {Utsugi, A and Koizumi, M and Mazuka, R},
   Title = {A robust method to detect dialectal differences in the
             perception of lexical pitch accent},
   Journal = {20th International Congress on Acoustics 2010, Ica 2010
             Incorporating Proceedings of the 2010 Annual Conference of
             the Australian Acoustical Society},
   Volume = {5},
   Pages = {3689-3696},
   Year = {2010},
   Month = {December},
   Abstract = {While Standard (Tokyo) Japanese has a lexical tonal system
             known as 'lexical pitch accent', there are some varieties of
             Japanese, called 'accentless' dialects, which do not have
             any lexical tonal phenomena. We investigated the differences
             in the perception of lexical pitch accent between the
             speakers of the accentless dialect and those of Standard
             Japanese, and the robustness of two approaches to
             investigate such dialectal differences. We conducted two
             experiments: categorical perception and sequence recall
             experiments. The former is an approach that has been
             traditionally employed to study the perception of
             phonological contrasts. The latter is a more recent method
             employed in studies of 'stress-deafness' in French by Dupoux
             and his colleagues, in which participants listen to
             sequences of several nonsense words and answer the order of
             the words. The results of the categorical perception
             experiment showed no clear dialectal differences. On the
             other hand, the results of the sequence recall task showed
             that the scores of the 'accentless' group were clearly lower
             than those of control (Standard Japanese) participants in
             the discrimination of nonsense words whose pitch accent
             differences corresponded to lexical differences in Standard
             Japanese phonology. Thus, it is concluded that the latter
             experimental approach is more robust to study dialectal
             differences in pitch accent perception than the
             former.},
   Key = {fds252828}
}

@article{fds252829,
   Author = {Miyazawa, K and Kikuchi, H and Mazuka, R},
   Title = {Unsupervised learning of vowels from continuous speech based
             on self-organized phoneme acquisition model},
   Journal = {Proceedings of the 11th Annual Conference of the
             International Speech Communication Association, Interspeech
             2010},
   Pages = {2914-2917},
   Year = {2010},
   Month = {December},
   Abstract = {All normal humans can acquire their native phoneme systems
             simply by living in their native language environment.
             However, it is unclear as to how infants learn the acoustic
             expression of each phoneme of their native languages. In
             recent studies, researchers have inspected phoneme
             acquisition by using a computational model. However, these
             studies have used read speech that has a limited vocabulary
             as input and do not handle a continuous speech that is
             almost comparable to a natural environment. Therefore, in
             this study, we use natural continuous speech and build a
             self-organization model that simulates the cognitive ability
             of the humans, and we analyze the quality and quantity of
             the speech information that is necessary for the acquisition
             of the native vowel system. Our model is designed to learn
             values of the acoustic characteristic of a natural
             continuous speech and to estimate the number and boundaries
             of the vowel categories without using explicit instructions.
             In the simulation trial, we investigate the relationship
             between the quantity of learning and the accuracy for the
             vowels in a single Japanese speaker's natural speech. As a
             result, it is found that the vowel recognition accuracy of
             our model is comparable to that of an adult. © 2010
             ISCA.},
   Key = {fds252829}
}

@article{fds252830,
   Author = {Mazuka, R},
   Title = {Learning the sound system of Japanese: What does it tell us
             about language acquisition?},
   Journal = {20th International Congress on Acoustics 2010, Ica 2010
             Incorporating Proceedings of the 2010 Annual Conference of
             the Australian Acoustical Society},
   Volume = {5},
   Pages = {4186-4193},
   Year = {2010},
   Month = {December},
   Abstract = {Infants learn much about the phonology of their own language
             during the first year of their lives. To date, however, the
             vast majority of the research on infant speech perception
             has been carried out with infants learning English and other
             European languages, and we know very little about how
             infants learning other languages learn the sound system of
             their languages. The phonological characteristics of
             Japanese differ from English and other European languages in
             important ways, and investigation of its acquisition has a
             potential of shedding important light onto our understanding
             of phonological acquisition. In this paper, we present data
             from Japanese are presented to exemplify this point;
             acquisition of mora-timed rhythm, edge-prominent prosody,
             lexical pitch-accent and segmental distribution.},
   Key = {fds252830}
}

@article{fds252842,
   Author = {Sato, Y and Sogabe, Y and Mazuka, R},
   Title = {Development of hemispheric specialization for lexical
             pitch-accent in Japanese infants.},
   Journal = {Journal of Cognitive Neuroscience},
   Volume = {22},
   Number = {11},
   Pages = {2503-2513},
   Year = {2010},
   Month = {November},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/19925204},
   Abstract = {Infants' speech perception abilities change through the
             first year of life, from broad sensitivity to a wide range
             of speech contrasts to becoming more finely attuned to their
             native language. What remains unclear, however, is how this
             perceptual change relates to brain responses to native
             language contrasts in terms of the functional specialization
             of the left and right hemispheres. Here, to elucidate the
             developmental changes in functional lateralization
             accompanying this perceptual change, we conducted two
             experiments on Japanese infants using Japanese lexical
             pitch-accent, which changes word meanings with the pitch
             pattern within words. In the first behavioral experiment,
             using visual habituation, we confirmed that infants at both
             4 and 10 months have sensitivities to the lexical
             pitch-accent pattern change embedded in disyllabic words. In
             the second experiment, near-infrared spectroscopy was used
             to measure cortical hemodynamic responses in the left and
             right hemispheres to the same lexical pitch-accent pattern
             changes and their pure tone counterparts. We found that
             brain responses to the pitch change within words differed
             between 4- and 10-month-old infants in terms of functional
             lateralization: Left hemisphere dominance for the perception
             of the pitch change embedded in words was seen only in the
             10-month-olds. These results suggest that the perceptual
             change in Japanese lexical pitch-accent may be related to a
             shift in functional lateralization from bilateral to left
             hemisphere dominance.},
   Doi = {10.1162/jocn.2009.21377},
   Key = {fds252842}
}

@article{fds252847,
   Author = {Yoshida, KA and Iversen, JR and Patel, AD and Mazuka, R and Nito, H and Gervain, J and Werker, JF},
   Title = {The development of perceptual grouping biases in infancy: a
             Japanese-English cross-linguistic study.},
   Journal = {Cognition},
   Volume = {115},
   Number = {2},
   Pages = {356-361},
   Year = {2010},
   Month = {May},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20144456},
   Abstract = {Perceptual grouping has traditionally been thought to be
             governed by innate, universal principles. However, recent
             work has found differences in Japanese and English speakers'
             non-linguistic perceptual grouping, implicating language in
             non-linguistic perceptual processes (Iversen, Patel, &
             Ohgushi, 2008). Two experiments test Japanese- and
             English-learning infants of 5-6 and 7-8 months of age to
             explore the development of grouping preferences. At 5-6
             months, neither the Japanese nor the English infants
             revealed any systematic perceptual biases. However, by 7-8
             months, the same age as when linguistic phrasal grouping
             develops, infants developed non-linguistic grouping
             preferences consistent with their language's structure (and
             the grouping biases found in adulthood). These results
             reveal an early difference in non-linguistic perception
             between infants growing up in different language
             environments. The possibility that infants' linguistic
             phrasal grouping is bootstrapped by abstract perceptual
             principles is discussed.},
   Doi = {10.1016/j.cognition.2010.01.005},
   Key = {fds252847}
}

@article{fds252839,
   Author = {Sato, Y and Sogabe, Y and Mazuka, R},
   Title = {Discrimination of phonemic vowel length by Japanese
             infants.},
   Journal = {Developmental Psychology},
   Volume = {46},
   Number = {1},
   Pages = {106-119},
   Year = {2010},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20053010},
   Abstract = {Japanese has a vowel duration contrast as one component of
             its language-specific phonemic repertory to distinguish word
             meanings. It is not clear, however, how a sensitivity to
             vowel duration can develop in a linguistic context. In the
             present study, using the visual habituation-dishabituation
             method, the authors evaluated infants' abilities to
             discriminate Japanese long and short vowels embedded in
             two-syllable words (/mana/ vs. /ma:na/). The results
             revealed that 4-month-old Japanese infants (n = 32) failed
             to discriminate the contrast (p = .676), whereas
             9.5-month-olds (n = 33) showed the discrimination ability (p
             = .014). The 7.5-month-olds did not show positive evidence
             to discriminate the contrast either when the edited stimuli
             were used (n = 33; p = .275) or when naturally uttered
             stimuli were used (n = 33; p = .189). By contrast, the
             4-month-olds (n = 24) showed sensitivity to a vowel quality
             change (/mana/ vs. /mina/; p = .034). These results indicate
             that Japanese infants acquire sensitivity to long-short
             vowel contrasts between 7.5 and 9.5 months of age and that
             the developmental course of the phonemic category by the
             durational changes is different from that by the quality
             change.},
   Doi = {10.1037/a0016718},
   Key = {fds252839}
}

@article{fds335697,
   Author = {Horie, R and Mazuka, R},
   Title = {Learning variation of deterministic chaos in auditory
             signals},
   Journal = {Neuroscience Research},
   Volume = {68},
   Pages = {e407-e407},
   Publisher = {Elsevier BV},
   Year = {2010},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.neures.2010.07.1804},
   Doi = {10.1016/j.neures.2010.07.1804},
   Key = {fds335697}
}

@article{fds252851,
   Author = {Mazuka, R and Jincho, N and Oishi, H},
   Title = {Development of executive control and language
             processing},
   Journal = {Language and Linguistics Compass},
   Volume = {3},
   Number = {1},
   Pages = {59-89},
   Publisher = {WILEY},
   Year = {2009},
   Month = {January},
   ISSN = {1749-818X},
   url = {http://dx.doi.org/10.1111/j.1749-818X.2008.00102.x},
   Abstract = {Research in executive function development has shown that
             children have poor control of inhibition functions,
             including the inhibition of prepotent responses, control of
             attention, and flexibility at rule-shifting. To date, links
             between the development of executive function and children's
             language development have not been investigated explicitly.
             Yet, recent studies on children's sentence processing report
             that children tend to perseverate during sentence
             processing. We argue that such perseveration may be due to
             immature executive function. © 2009 Blackwell Publishing
             Ltd.},
   Doi = {10.1111/j.1749-818X.2008.00102.x},
   Key = {fds252851}
}

@article{fds252848,
   Author = {Mazuka, R},
   Title = {Acquisition of linguistic-rhythm and prosodic bootstrapping
             hypothesis (In Japanese; Gengorizumu no kakutoku to inritsu
             ni yoru bootosutorappingukasetsu"},
   Journal = {Japanese Journal of Phonology},
   Volume = {13},
   Number = {3},
   Pages = {19-32},
   Year = {2009},
   Abstract = {In the Rhythm-based Prosodic Bootstrapping Hypothesis, it is
             proposed that infants' early sensitivity to the rhythmic
             properties of a language will enable them to adopt a
             metrical speech segmentation strategy appropriate for their
             language. The proposal was borne out of recent research in
             infant speech perception which demonstrated that young
             infants are sensitive to prosodic properties of language
             that are relevant to linguistic rhythm. Systematic
             evaluation of the literature revealed that while the
             acquisition of stress-timed languages appear to fit the
             prediction of the bootstrapping hypothesis, data from the
             other languages are not so clear. Japanese data, in
             particular, is not consistent with the hypothesis. It is
             argued that the rhythm of a language may be salient for
             infants in all languages, but how this sensitivity is linked
             to other aspects of language acquisition may differ for the
             three rhythm types.},
   Key = {fds252848}
}

@article{fds252850,
   Author = {Gervain, J and Nespor, M and Mazuka, R and Horie, R and Mehler,
             J},
   Title = {Bootstrapping word order in prelexical infants: a
             Japanese-Italian cross-linguistic study.},
   Journal = {Cognitive Psychology},
   Volume = {57},
   Number = {1},
   Pages = {56-74},
   Year = {2008},
   Month = {August},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18241850},
   Abstract = {Learning word order is one of the earliest feats infants
             accomplish during language acquisition [Brown, R. (1973). A
             first language: The early stages, Cambridge, MA: Harvard
             University Press.]. Two theories have been proposed to
             account for this fact. Constructivist/lexicalist theories
             [Tomasello, M. (2000). Do young children have adult
             syntactic competence? Cognition, 74(3), 209-253.] argue that
             word order is learned separately for each lexical item or
             construction. Generativist theories [Chomsky, N. (1995). The
             Minimalist Program. Cambridge, MA: MIT Press.], on the other
             hand, claim that word order is an abstract and general
             property, determined from the input independently of
             individual words. Here, we show that eight-month-old
             Japanese and Italian infants have opposite order preferences
             in an artificial grammar experiment, mirroring the opposite
             word orders of their respective native languages. This
             suggests that infants possess some representation of word
             order prelexically, arguing for the generativist view. We
             propose a frequency-based bootstrapping mechanism to account
             for our results, arguing that infants might build this
             representation by tracking the order of functors and content
             words, identified through their different frequency
             distributions. We investigate frequency and word order
             patterns in infant-directed Japanese and Italian corpora to
             support this claim.},
   Doi = {10.1016/j.cogpsych.2007.12.001},
   Key = {fds252850}
}

@article{fds304694,
   Author = {Jincho, N and Namiki, H and Mazuka, R},
   Title = {Effects of verbal working memory and cumulative linguistic
             knowledge on reading comprehension},
   Journal = {Japanese Psychological Research},
   Volume = {50},
   Number = {1},
   Pages = {12-23},
   Publisher = {WILEY},
   Year = {2008},
   Month = {March},
   ISSN = {0021-5368},
   url = {http://dx.doi.org/10.1111/j.1468-5884.2007.00358.x},
   Abstract = {In the present study, the effects of verbal working memory
             (VWM) and cumulative linguistic knowledge (CLK) on reading
             comprehension were investigated using an individual
             difference approach. We examined whether VWM and CLK are
             distinct verbal factors and whether each has independent
             influences on reading comprehension. VWM was tested using
             the Japanese Reading Span Test (RST). CLK was assessed using
             information, vocabulary, and similarity subtests of the
             Wechsler Adult Intelligence Scale-Revised (WAIS-R), as well
             as with the Hyakurakan kanji reading test. The differences
             between VWM and CLK were examined using correlation analyses
             between reading comprehension scores, and digit forward and
             backward span scores. The results showed that VWM and CLK
             were independent of each other, and that VWM and CLK
             independently contributed to reading comprehension. The
             obtained correlations also showed that CLK was independent
             of any type of short-term memory, and that the VWM measured
             using the RST had little correlation with digit span. ©
             Japanese Psychological Association 2008.},
   Doi = {10.1111/j.1468-5884.2007.00358.x},
   Key = {fds304694}
}

@article{fds252849,
   Author = {Mazuka, R},
   Title = {Infant speech perception and language acquisition (In
             Japanese;Nyuuji no onsei chikaku to gengo
             kakutoku)},
   Journal = {Life Science (In Japanese; Seitai No Kagaku)},
   Volume = {59},
   Number = {5},
   Pages = {448-449},
   Year = {2008},
   Key = {fds252849}
}

@article{fds252854,
   Author = {Jincho, N and Namiki, H and Mazuka, R},
   Title = {Effects of verbal working memory and cumulative linguistic
             knowledge on reading comprehension},
   Journal = {Japanese Psychological Research},
   Volume = {51},
   Number = {1},
   Pages = {12-23},
   Year = {2008},
   ISSN = {0021-5368},
   url = {http://dx.doi.org/10.1111/j.1468-5884.2007.00358.x},
   Abstract = {In the present study, the effects of verbal working memory
             (VWM) and cumulative linguistic knowledge (CLK) on reading
             comprehension were investigated using an individual
             difference approach. We examined whether VWM and CLK are
             distinct verbal factors and whether each has independent
             influences on reading comprehension. VWM was tested using
             the Japanese Reading Span Test (RST). CLK was assessed using
             information, vocabulary, and similarity subtests of the
             Wechsler Adult Intelligence Scale-Revised (WAIS-R), as well
             as with the Hyakurakan kanji reading test. The differences
             between VWM and CLK were examined using correlation analyses
             between reading comprehension scores, and digit forward and
             backward span scores. The results showed that VWM and CLK
             were independent of each other, and that VWM and CLK
             independently contributed to reading comprehension. The
             obtained correlations also showed that CLK was independent
             of any type of short-term memory, and that the VWM measured
             using the RST had little correlation with digit span. ©
             Japanese Psychological Association 2008.},
   Doi = {10.1111/j.1468-5884.2007.00358.x},
   Key = {fds252854}
}

@article{fds252853,
   Author = {Sato, Y and Sogabe, Y and Mazuka, R},
   Title = {Brain responses in the processing of lexical pitch-accent by
             Japanese speakers.},
   Journal = {Neuroreport},
   Volume = {18},
   Number = {18},
   Pages = {2001-2004},
   Year = {2007},
   Month = {December},
   ISSN = {0959-4965},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18007202},
   Abstract = {Near-infrared spectroscopy was used to elucidate the neural
             mechanisms underlying the processing of Japanese lexical
             pitch-accent by adult native speakers of Japanese. We
             measured cortical hemodynamic responses to a pitch pattern
             change (high-low vs. low-high) embedded in disyllabic words
             or pure tones. The results showed that the responses to the
             pitch pattern change within the words were larger than those
             for the pure tones in the left temporoparietal region.
             Activation in the left frontal region was also observed for
             the perception of pitch pattern change within the words.
             These results indicate that the left language-related
             regions contribute to the processing of lexical pitch-accent
             in native Japanese speakers.},
   Doi = {10.1097/wnr.0b013e3282f262de},
   Key = {fds252853}
}

@article{fds252826,
   Author = {Sato, Y and Mazuka, R and Sogabe, Y},
   Title = {A near-infrared spectroscopy (NIRS) study of lexical pitch
             accent processing in Japanese speakers},
   Journal = {Journal of Cerebral Blood Flow and Metabolism},
   Volume = {27},
   Number = {SUPPL. 1},
   Pages = {BP13-B06M},
   Year = {2007},
   Month = {November},
   ISSN = {0271-678X},
   Abstract = {Background and aims: The aim of this study is to investigate
             the neural mechanisms underlying the processing of Japanese
             lexical pitch accent. The left and right cerebral
             hemispheres work together but differently for auditory
             language processing in human adults. The left side activates
             dominantly for processing most of the linguistic aspects of
             speech, including phonemic, lexical and syntactic analyses
             (Purves, 2001; Zatorre et al., 1992). On the other hand, the
             right dominant or nondominant activations are observed
             during processing of prosodic cues such as intonational
             pitch characterized by changing fundamental frequencies
             (Imaizumi et al., 1998; Zatorre et al., 1992). Although
             prosody is usually associated with the melodic features of
             spoken language, lexical level prosody such as Japanese
             pitch accent can be used to distinguish word meanings. For
             example, a pair of homophones with two syllables is
             distinguished by the pitch accent pattern which is either
             high-low (HL) or low-high (LH) such as a'me (HL: "rain") vs.
             ame' (LH: "candy"). A question then arises with regard to
             how the lexical pitch accent is processed, especially in
             terms of the functional lateralization. Methods: We employed
             44-channel near-infrared spectroscopy (NIRS), which can
             noninvasively measure relative changes in the concentration
             of hemoglobin (Hb) in the localized brain tissue. The
             subjects were healthy right-handed Japanese speakers (n=18,
             nine male and nine female, 20-22 years). We used a'me and
             ame' tokens (14 pairs), ka'me (turtle) and kame' (ceramic
             jar) tokens (14 pairs), and bisyllabic Japanese words
             differing by pitch accent pattern (HL vs. LH) (14pairs). In
             addition, pure tone stimuli were created by extracting
             fundamental frequencies from the a'me and ame' words. Four
             conditions were performed in a block design paradigm. In the
             ame condition, the baseline block (20 s or 25 s) contained
             only a'me or ame' which was repeated approximately every
             1.25 s. The test block (10 s) contained both a'me and ame'
             presented in a pseudo-random order with the equal
             probabilities. The kame condition was included to compare
             the phonemic change (/a/ to /ka/) to the lexical
             pitch-accent. In this condition, a'me or ame' was presented
             during the baseline, but ame and kame were presented in the
             test block. Similarly, in the variable words and the pure
             tone conditions, the baseline block contained either HL or
             LH pattern stimuli, the test block consisted of both pitch
             pattern stimuli. In order to assess cerebral lateralization,
             a laterality index, LI = (L ? R) / (L + R), was calculated
             from the peaks of the Oxy-Hb responses in the left (L) and
             the right (R) temporal areas in each condition. Results and
             conclusions: The results showed that the LI for the pure
             tone condition was significantly smaller than those for
             other three conditions, indicating that the lexical pitch
             accent is processed with more leftward shift compared with
             the processing of non-linguistic pure tone stimuli. Our
             results suggest that unlike more global prosody, which has
             been reported to be processed by the right hemisphere,
             lexical pitch accent is processed quot;linguisticallyquot;
             by Japanese native speakers.},
   Key = {fds252826}
}

@article{fds252824,
   Author = {Imai, M and Mazuka, R},
   Title = {Language-relative construal of individuation constrained by
             universal ontology: revisiting language universals and
             linguistic relativity.},
   Journal = {Cognitive Science},
   Volume = {31},
   Number = {3},
   Pages = {385-413},
   Year = {2007},
   Month = {May},
   ISSN = {0364-0213},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21635302},
   Abstract = {Objects and substances bear fundamentally different
             ontologies. In this article, we examine the relations
             between language, the ontological distinction with respect
             to individuation, and the world. Specifically, in
             cross-linguistic developmental studies that follow Imai and
             Gentner (1997), we examine the question of whether language
             influences our thought in different forms, like (1) whether
             the language-specific construal of entities found in a word
             extension context (Imai & Gentner, 1997) is also found in a
             nonlinguistic classification context; (2) whether the
             presence of labels per se, independent of the count-mass
             syntax, fosters ontology-based classification; (3) in what
             way, if at all, the count-mass syntax that accompanies a
             label changes English speakers' default construal of a given
             entity? On the basis of the results, we argue that the
             ontological distinction concerning individuation is
             universally shared and functions as a constraint on early
             learning of words. At the same time, language influences
             one's construal of entities cross-lingistically and
             developmentally, and causes a temporary change of construal
             within a single language. We provide a detailed discussion
             of how each of these three ways language may affect the
             construal of entities, and discuss how our universally
             possessed knowledge interacts with language both within a
             single language and in cross-linguistic context.},
   Doi = {10.1080/15326900701326436},
   Key = {fds252824}
}

@article{fds335698,
   Author = {Matsuda, Y and Ueno, KI and Waggoner, RA and Erickson, D and Shimura, Y and Tanaka, K and Cheng, K and Mazuka, R},
   Title = {Processing of infant-directed speech in parents: An fMRI
             study},
   Journal = {Neuroscience Research},
   Volume = {58},
   Pages = {S45-S45},
   Publisher = {Elsevier BV},
   Year = {2007},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.neures.2007.06.265},
   Doi = {10.1016/j.neures.2007.06.265},
   Key = {fds335698}
}

@article{fds252852,
   Author = {Mazuka, R},
   Title = {The rhythm-based prosodic bootstrapping hypothesis of early
             language acquisition: Does it work for learning for all
             languages?},
   Journal = {Journal of the Liguistic Society of Japan},
   Volume = {9},
   Number = {132},
   Pages = {1-13},
   Year = {2007},
   Key = {fds252852}
}

@article{fds252855,
   Author = {Imai, M and Mazuka, R},
   Title = {Revisiting language universals and linguistic relativity:
             language-relative construal of individuation constrained by
             universal ontology},
   Journal = {Cognitive Science},
   Volume = {31},
   Pages = {385-414},
   Year = {2007},
   Key = {fds252855}
}

@article{fds252856,
   Author = {Mazuka, R},
   Title = {"Nyuuji no onsee-chikaku-gakushuu ni okeru kobetsu gengo no
             eikyou -- Hayashi ronbun e no komento--" (In Japanese:
             Influence of individual languages for infants' speech
             perception development. -- response to Hayashi paper
             --)},
   Journal = {Japanese Psychological Review},
   Volume = {49},
   Number = {1},
   Pages = {75-77},
   Year = {2006},
   Key = {fds252856}
}

@article{fds252857,
   Author = {Mazuka, R},
   Title = {"Gengo-nai no kobetsu reberu tokusei to gengo kakutoku no
             mekanizumu" (In Japanese: The role of language specific
             characteristics for the mechanisms for language
             acquisition)},
   Journal = {Baby Science},
   Volume = {5},
   Pages = {37-38},
   Year = {2006},
   Key = {fds252857}
}

@article{fds252825,
   Author = {Choi, Y and Mazuka, R},
   Title = {Young children's use of prosody in sentence
             parsing.},
   Journal = {Journal of Psycholinguistic Research},
   Volume = {32},
   Number = {2},
   Pages = {197-217},
   Year = {2003},
   Month = {March},
   ISSN = {0090-6905},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/12690831},
   Abstract = {Korean children's ability to use prosodic phrasing in
             sentence comprehension was studied using two types of
             ambiguity. First, we examined a word-segmentation ambiguity
             in which placement of the phrasal boundary leads to
             different interpretations of a sentence. Next, we examined a
             syntactic ambiguity in which the same words were differently
             grouped into syntactic phrases by prosodic demarcation.
             Children aged 3 or 4 years showed that they could use
             prosodic information to segment utterances and to derive the
             meaning of ambiguous sentences when the sentences only
             contained a word-segmentation ambiguity. However, even 5- to
             6-year-old children were not able to reliably resolve the
             second type of ambiguity, an ambiguity of phrasal grouping,
             by using prosodic information. The results demonstrate that
             children's difficulties in dealing with structural ambiguity
             are not due to their inability to use prosodic
             information.},
   Doi = {10.1023/a:1022400424874},
   Key = {fds252825}
}

@article{fds252860,
   Author = {Mazuka, R and Friedman, RS},
   Title = {Linguistic relativity in Japanese and English: Is language
             the primary determinant in object classification},
   Journal = {Journal of East Asian Linguistics},
   Volume = {9},
   Number = {4},
   Pages = {353-377},
   Year = {2000},
   ISSN = {0925-8558},
   Abstract = {In the present study, we tested claims by Lucy (1992a,
             1992b) that differences between the number marking systems
             used by Yucatec Maya and English lead speakers of these
             languages to differentially attend to either the material
             composition or the shape of objects. In order to evaluate
             Lucy's hypothesis, we replicated his critical object
             classification experiment using speakers of English and
             Japanese, a language with a number marking system very
             similar to that employed by Yucatec Maya. Our results failed
             to replicate Lucy's findings. Both Japanese and English
             speakers, who were comparable in their cultural and
             educational backgrounds, classified objects more on the
             basis of shape than material composition, suggesting that
             Lucy's original findings may have resulted not from
             differences between the number marking systems of Yucatec
             Maya and English but rather from differences in the cultural
             and educational backgrounds of his experimental groups.
             Alternative accounts of the cognitive consequences of
             inter-linguistic differences in number marking systems are
             discussed. © 2000 Kluwer Academic Publishers.},
   Key = {fds252860}
}

@article{fds335699,
   Author = {YOSHIOKA, K and HAYASHI, A and DEGUCHI, T and MAZUKA,
             R},
   Title = {Four to ten month-old infants' sensitivity to the rhythmic
             pattern of Japanese baby-words},
   Journal = {日本音響学会研究発表会講演論文集},
   Volume = {1998},
   Number = {1},
   Pages = {377-378},
   Year = {1998},
   Month = {March},
   Key = {fds335699}
}

@article{fds304692,
   Author = {Mazuka, R and Itoh, K and Kondo, T},
   Title = {Processing down the garden path in Japanese: processing of
             sentences with lexical homonyms.},
   Journal = {Journal of Psycholinguistic Research},
   Volume = {26},
   Number = {2},
   Pages = {207-228},
   Year = {1997},
   Month = {March},
   ISSN = {0090-6905},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/9110433},
   Abstract = {This paper investigates whether or not Japanese sentences
             with lexical homonyms cause measurable processing
             difficulties for Japanese speakers. Pairs of sentences
             involving lexical homonyms were tested with three types of
             questionnaires (who-did-what questions, difficulty ratings,
             and misleadingness ratings) and two experimental tests (an
             eye-movement monitoring experiment and a self-paced reading
             experiment). In both the difficulty rating and the
             misleadingness rating questionnaires, "late boundary"
             sentences, in which a phrase boundary followed a homonymous
             phrase, were rated as significantly more difficult and more
             misleading than "early boundary" sentences, where the
             boundary preceded the homonymous phrase. The results from
             the eye-movement study and the self-paced reading study
             showed that the late boundary difficulties were associated
             with the processing of the regions that followed the
             homonymous phrases. These results confirmed our prediction
             that the difficulty of late boundary sentences is likely to
             be caused by a subject's original misanalysis and subsequent
             revision. The results are discussed in terms of possible
             reasons why the early boundary version was preferred in
             these sentences.},
   Doi = {10.1023/a:1025013716381},
   Key = {fds304692}
}

@article{fds304693,
   Author = {Misono, Y and Mazuka, R and Kondo, T and Kiritani,
             S},
   Title = {Effects and limitations of prosodic and semantic biases on
             syntactic disambiguation.},
   Journal = {Journal of Psycholinguistic Research},
   Volume = {26},
   Number = {2},
   Pages = {229-245},
   Year = {1997},
   Month = {March},
   ISSN = {0090-6905},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/9110434},
   Abstract = {This paper examined the effects of prosody on the syntactic
             ambiguity resolution of Japanese sentences, especially with
             reference to the interaction with semantic bias.
             Syntactically ambiguous sentences with different types of
             semantic bias were constructed. The degree of bias in each
             sentence was evaluated through visual presentation
             experiments. Three types of sentences were selected based on
             the results of visual presentation experiments, were
             recorded with prosody maximally favoring each possible
             interpretation of the sentences, and were used as the
             stimuli for the auditory presentation experiments. The
             results showed that prosodic cues can influence the
             interpretation of a sentence even when the sentence is
             strongly semantically biased. The results also showed a
             limitation to prosodic cues. The prosodic biases alone were
             not sufficient to fully determine the interpretation of the
             sentences even when the sentences were neutrally biased
             semantically.},
   Doi = {10.1023/a:1025065700451},
   Key = {fds304693}
}

@article{fds252858,
   Author = {Misono, Y and Mazuka, R and Kondo, T and Kiritani,
             S},
   Title = {Effects and limitations of prosodic and semantic biases on
             syntactic ambiguity resolution of Japanese
             sentences},
   Journal = {Journal of Psycholinguistic Research},
   Volume = {26},
   Number = {2},
   Pages = {229-245},
   Year = {1997},
   ISSN = {0090-6905},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/9110434},
   Abstract = {This paper examined the effects of prosody on the syntactic
             ambiguity resolution of Japanese sentences, especially with
             reference to the interaction with semantic bias.
             Syntactically ambiguous sentences with different types of
             semantic bias were constructed. The degree of bias in each
             sentence was evaluated through visual presentation
             experiments. Three types of sentences were selected based on
             the results of visual presentation experiments, were
             recorded with prosody maximally favoring each possible
             interpretation of the sentences, and were used as the
             stimuli for the auditory presentation experiments. The
             results showed that prosodic cues can influence the
             interpretation of a sentence even when the sentence is
             strongly semantically biased. The results also showed a
             limitation to prosodic cues. The prosodic biases alone were
             not sufficient to fully determine the interpretation of the
             sentences even when the sentences were neutrally biased
             semantically.},
   Key = {fds252858}
}

@article{fds252859,
   Author = {Mazuka, R and Itoh, K and Kondo, T},
   Title = {Processing down the Japanese garden-path
             sentences},
   Journal = {Journal of Psycholinguistic Research},
   Volume = {26},
   Number = {2},
   Pages = {207-228},
   Year = {1997},
   ISSN = {0090-6905},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/9110433},
   Abstract = {This paper investigates whether or not Japanese sentences
             with lexical homonyms cause measurable processing
             difficulties for Japanese speakers. Pairs of sentences
             involving lexical homonyms were tested with three types of
             questionnaires (who-did-what questions, difficulty ratings,
             and misleadingness ratings) and two experimental tests (an
             eye-movement monitoring experiment and a self-paced reading
             experiment). In both the difficulty rating and the
             misleadingness rating questionnaires, "late boundary"
             sentences, in which a phrase boundary followed a homonymous
             phrase, were rated as significantly more difficult and more
             misleading than "early boundary" sentences, where the
             boundary preceded the homonymous phrase. The results from
             the eye-movement study and the self-paced reading study
             showed that the late boundary difficulties were associated
             with the processing of the regions that followed the
             homonymous phrases. These results confirmed our prediction
             that the difficulty of late boundary sentences is likely to
             be caused by a subject's original misanalysis and subsequent
             revision. The results are discussed in terms of possible
             reasons why the early boundary version was preferred in
             these sentences.},
   Key = {fds252859}
}

@article{fds252822,
   Author = {Yokoyama, H and Niwa, S and Itoh, K and Mazuka, R},
   Title = {Fractal property of eye movements in schizophrenia.},
   Journal = {Biological Cybernetics},
   Volume = {75},
   Number = {2},
   Pages = {137-140},
   Year = {1996},
   Month = {August},
   ISSN = {0340-1200},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/8855352},
   Abstract = {On the basis of a temporal model of animal behavior we
             conducted temporal analysis of eye movements in
             schizophrenic subjects (n = 10) and normal controls (n =
             10). We found a fractal property in schizophrenic subjects,
             the fixation time of eye movement during reading ambiguous
             and difficult sentences showing a clear inverse power law
             distribution. An exponential distribution of a nonfractal
             nature was found in normal controls.},
   Doi = {10.1007/s004220050281},
   Key = {fds252822}
}

@article{fds252820,
   Author = {Kondo, T and Mazuka, R},
   Title = {Prosodic planning while reading aloud: on-line examination
             of Japanese sentences.},
   Journal = {Journal of Psycholinguistic Research},
   Volume = {25},
   Number = {2},
   Pages = {357-381},
   Year = {1996},
   Month = {March},
   ISSN = {0090-6905},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/8667303},
   Abstract = {In this paper, we discuss the process of generating prosody
             on-line while reading a sentence orally. We report results
             from two studies in which eye-voice span was measured while
             subjects read aloud. In study one, the average eye-voice
             span for simple texts was only about 2.5 characters. In
             study two, the eye-voice span was also about 2.5 characters
             even when the subjects read garden-path sentences which
             required reanalysis during processing. That the readers
             looked only a few characters ahead before reading aloud
             suggests that the prosody which they generate is not based
             on a global syntactic analysis, but instead reflects only
             limited, local syntactic information. The subjects,
             therefore, make errors and repairs when this locally
             determined prosody obviously contradicts the meaning of the
             sentence.},
   Doi = {10.1007/bf01708578},
   Key = {fds252820}
}

@article{fds302962,
   Author = {Lust, B and Eisele, J and Mazuka, R},
   Title = {The Binding Theory Module: Evidence from First Language
             Acquisition for Principle C},
   Journal = {Language},
   Volume = {68},
   Number = {2},
   Pages = {333-333},
   Publisher = {JSTOR},
   Year = {1992},
   Month = {June},
   ISSN = {0097-8507},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1992JE25800004&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.2307/416944},
   Key = {fds302962}
}

@article{fds252819,
   Author = {Mazuka, R},
   Title = {Processing of empty categories in Japanese},
   Journal = {Journal of Psycholinguistic Research},
   Volume = {20},
   Number = {3},
   Pages = {215-232},
   Publisher = {Springer Nature},
   Year = {1991},
   Month = {May},
   ISSN = {0090-6905},
   url = {http://dx.doi.org/10.1007/BF01067216},
   Abstract = {Recent experimental research on the processing of empty
             categories (EC) in English points to the general conclusion
             that during on-line processing of a sentence, not only is
             the presence of an EC detected but its linguistically
             legitimate antecedents are also computed. In this paper, it
             is argued that, in Japanese, ECs pose serious problems for
             on-line processing if they are to be processed in a manner
             similar to English. Initial experimental data indicates
             that, in Japanese, the processor may not recognize an EC
             during initial on-line processing of a sentence. It is
             tentatively suggested that processing of an EC in Japanese
             may be delayed until after the on-line processing of the
             structure of a sentence. © 1991 Plenum Publishing
             Corporation.},
   Doi = {10.1007/BF01067216},
   Key = {fds252819}
}

@article{fds252818,
   Author = {Lust, B and Mazuka, R},
   Title = {Cross-linguistic studies of directionality in first language
             acquisition: the Japanese data--a response to O'Grady,
             Suzuki-Wei & Cho 1986.},
   Journal = {Journal of Child Language},
   Volume = {16},
   Number = {3},
   Pages = {665-684},
   Year = {1989},
   Month = {October},
   ISSN = {0305-0009},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/2808580},
   Abstract = {Elsewhere we have argued on the basis of cross linguistic
             studies of directionality effects on anaphora in child
             language, that there is no universal 'forward directionality
             preference (FDP)'; rather such a preference is linked to
             languages with specific grammatical properties. Although
             such a preference has been attested in English acquisition,
             matched experimental designs in Japanese, Chinese and
             Sinhalese, for example, do not show this effect. In this
             paper we argue that current attempts to show that forward
             directionality effects can also be induced in Japanese
             acquisition do not succeed in supporting the FDP. Specifics
             of the design of stimulus sentences in these experiments
             vary previous cross-linguistic designs so as to favour
             forward directionality on independent grounds, and confound
             cross linguistic comparisons. They in fact support a
             universal structure dependence in children's hypotheses
             about directionality of anaphora.},
   Key = {fds252818}
}

@article{fds302961,
   Author = {LUST, B and MAZUKA, R},
   Title = {CROSS-LINGUISTIC STUDIES OF DIRECTIONALITY IN 1ST
             LANGUAGE-ACQUISITION - THE JAPANESE DATA - A
             RESPONSE},
   Journal = {Journal of Child Language},
   Volume = {16},
   Number = {3},
   Pages = {665-684},
   Year = {1989},
   Month = {October},
   ISSN = {0305-0009},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1989CA03600012&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1017/S0305000900010783},
   Key = {fds302961}
}


%% Books   
@book{fds6112,
   Author = {Mazuka, R.},
   Title = {The Development of Language Processing Strategies: A
             cross-linguistic study between Japanese and
             English},
   Publisher = {Lawrence Erlbaum Associates},
   Year = {1998},
   Key = {fds6112}
}

@book{fds38672,
   Author = {Mazuka, R. and Nagai N.},
   Title = {Japanese Sentence Processing},
   Publisher = {Lawrence Erlbaum},
   Year = {1995},
   Key = {fds38672}
}


%% Chapters in Books   
@misc{fds39782,
   Author = {Mazuka, R. and Itoh, K. and Kondo, T.},
   Title = {Cost of scrambling in Japanese sentence processing},
   Booktitle = {Papers from International East Asian Psycholinguistics
             Workshop},
   Publisher = {CSLI, Stanford, California},
   Editor = {M. Nakayama},
   Year = {2002},
   Key = {fds39782}
}

@misc{fds39783,
   Author = {Mazuka, R.},
   Title = {Can a grammatical parameter be set before the first word?
             Prosodic contributions to early setting of a grammatical
             parameter},
   Booktitle = {Signal to Syntax: Bootstrapping from Speech to Grammar in
             Early Acquisition},
   Publisher = {Lawrence Erlbaum},
   Editor = {J. Morgan, and K. Demuth},
   Year = {1996},
   Key = {fds39783}
}


%% Chapters in Books   
@misc{fds185558,
   Author = {Jincho, N. and Mazuka, R},
   Title = {Individual differences in sentence processing: Effects of
             verbal working memory and cumulative linguistic
             knowledge},
   Volume = {38},
   Pages = {49-65},
   Booktitle = {Processing and producing head-final structures, Studies in
             Theoretical Psycholinguistics, 38},
   Publisher = {Springer},
   Editor = {H. Yamashita and Y. Hirose and J. L. Packard},
   Year = {2011},
   Key = {fds185558}
}

@misc{fds169347,
   Author = {Choi, Y.-O. and Mazuka, R.},
   Title = {Acquisition of prosody in Korean},
   Volume = {III},
   Pages = {255-268},
   Booktitle = {Handbook of Eastasian Psycholinguistics, Volume III,
             Korean},
   Publisher = {Cambridge University Press},
   Editor = {Lee, C.-M. and Simpson, G. and Kim, Y.J.},
   Year = {2009},
   ISBN = {978-0-521-83335-6},
   Key = {fds169347}
}

@misc{fds252796,
   Author = {Mazuka, R and Kondo, T and Hayashi, A},
   Title = {Japanese mothers' use of specialized vocabulary in
             infant-directed speech: Infant-directed vocabulary in
             Japanese},
   Pages = {39-58},
   Booktitle = {The Origins of Language: Unraveling Evolutionary
             Forces},
   Publisher = {Springer Japan},
   Editor = {N. Masataka},
   Year = {2008},
   Month = {January},
   ISBN = {9784431791010},
   url = {http://dx.doi.org/10.1007/978-4-431-79102-7_4},
   Abstract = {© Springer 2008. All rights reserved. When adults talk to
             infants or young children, they modify their speech. The
             specialized speech is sometimes called motherese or
             infant-directed speech (IDS). Many characteristics of IDS
             have been documented across many languages, but the best
             known characteristics of IDS have to do with prosody of the
             speech, such as higher pitch and exaggerated pitch contours,
             and longer more frequent pauses (c.f., Fernald and Simon
             1984; Fernald and Kuhl 1987; Fernald and Mazzie 1991; Snow
             and Ferguson 1977). Other types of modifi cations also
             occur, such as changes in syntactic properties, e.g.,
             shorter and simpler utterances, and semantic contents, e.g.,
             conversation about here and now (c.f., Newport et al. 1977).
             It has often been argued that many of the IDS properties are
             universal (Fernald 1993; Fisher and Tokura 1996; Grieser and
             Kuhl 1988; Kuhl and et al. 1997; Trainer and et al. 2000),
             but there are signifi cant cross-linguistic variations in
             the way mothers interact with their infants (e.g., Fernald
             and Morikawa 1993), and the way adults modify their speech
             in IDS (Fernald et al. 1989).},
   Doi = {10.1007/978-4-431-79102-7_4},
   Key = {fds252796}
}

@misc{fds169352,
   Author = {R. Mazuka},
   Title = {Age of acquisition and critical period in language
             acquisition (In Japanese; Gengo kakutoku ni okeru nenrei
             kooka ha rinkaiki ka)},
   Pages = {39-58},
   Booktitle = {Brain Science and Communication (Gengo to shiko o umu
             no)},
   Publisher = {University of Tokyo Press},
   Editor = {A. Iriki},
   Year = {2008},
   Key = {fds169352}
}

@misc{fds53296,
   Author = {T. Kondo and R. Mazuka and K. Kakehi},
   Title = {Role of lexical properties in Japanese sentence
             processing},
   Pages = {226-232},
   Booktitle = {Handbook of East-Asian Psycholinguistics: Volume II,
             Japanese},
   Publisher = {Cambridge University Press},
   Editor = {M. Nakayama and R. Mazuka and Y. Shirai},
   Year = {2006},
   Key = {fds53296}
}

@misc{fds53297,
   Author = {M. Nakayama and Y. Shirai and R. Mazuka},
   Title = {Introduction},
   Pages = {1-10},
   Booktitle = {Handbook of East-Asian Psycholinguistics: Volume II,
             Japanese},
   Publisher = {Cambridge University Press},
   Editor = {M. Nakayama and R. Mazuka and Y. Shirai},
   Year = {2006},
   Key = {fds53297}
}


%% Papers Published   
@article{fds6114,
   Author = {Choi, Y. and Mazuka, R. and Akahane-Yamada, R.},
   Title = {Korean and Japanese children’s production of English /l/
             and /r/},
   Booktitle = {Papers from the Workshop on Acquisition of East Asian
             Languages},
   Publisher = {Kuroshio Publisher, Tokyo, Japan},
   Editor = {Nakayama, M.},
   Year = {2001},
   Key = {fds6114}
}


%% Commentaries/Book Reviews   
@article{fds141551,
   Author = {Y. Sato, and R. Mazuka},
   Title = {Relation between prenatal learning and post-natal language
             development: Comments on Morokuma, S., Fukushima, K.,
             Nakano, H., and Wake, N. "Evaluating central nervous system
             fetus' behavior" (In Japanese)},
   Journal = {Baby Science},
   Number = {7},
   Pages = {16-17},
   Year = {2008},
   Key = {fds141551}
}


%% Edited Volumes   
@misc{fds201625,
   Author = {Arita, S. and Goto Butler and Y., Hauser and E., Horie and K., Mazuka and R., Shirai and Y. and Tsubakita, J},
   Title = {Papers from the Tenth Annual Conference of th Japanese
             Society for Language Sciences: Studies in Language Sciences
             10},
   Publisher = {Kuroshio Publishers},
   Address = {Tokyo, Japan},
   Year = {2011},
   Key = {fds201625}
}

@misc{fds53295,
   Author = {M. Nakayama and R. Mazuka and Y. Shirai},
   Title = {Handbook of East-Asian Psycholinguistics: Volume 2
             Japanese},
   Publisher = {Cambridge University Press},
   Year = {2006},
   Key = {fds53295}
}


%% Other   
@misc{fds201718,
   Author = {Arai, M. and Nakamura, C. and Mazuka R},
   Title = {An anticipatory effect of syntactic priming in processing of
             structurally ambiguous sentences},
   Journal = {2011 IEICE Technical Report},
   Pages = {83-86},
   Year = {2011},
   Key = {fds201718}
}

@misc{fds201617,
   Author = {Utsugi, A. and Koizumi, M. and Mazuka, R},
   Title = {Subtle differences between the speech of young speakers of
             `Accentless'and Standard Japanese dialects: An analysis of
             pitch peak alignment},
   Journal = {Proceedings for the 17th The 17th International Congress of
             Phonetic Science},
   Pages = {2046-2049},
   Year = {2011},
   Key = {fds201617}
}

@misc{fds201619,
   Author = {Jincho, N. and Oishi, H. and Mazuka, R},
   Title = {Effects of vision and language on attention during sentence
             comprehension-A Visual world study},
   Journal = {IEICE Technical Report},
   Volume = {TL2011-16},
   Number = {2011-8},
   Pages = {49-52},
   Year = {2011},
   Key = {fds201619}
}

@misc{fds185587,
   Author = {R. Mazuka},
   Title = {Learning the melody of a language: Investigation into
             language acquisition through the prosody of
             Japanese},
   Journal = {Proceedings of 2010 IEICE General Conference},
   Pages = {SS35-38},
   Year = {2010},
   Key = {fds185587}
}

@misc{fds185589,
   Author = {Arai, M. and Mazuka, R},
   Title = {Linking syntactic priming to language development: a visual
             world eye-tracking study},
   Journal = {Technical Report of The institute of Electronics,
             Information and Communication Engineers},
   Volume = {110-163},
   Number = {(TL2010-18)},
   Pages = {43-48},
   Year = {2010},
   Key = {fds185589}
}

@misc{fds185588,
   Author = {Oishi, H. and Jincho, N. and Mazuka, R},
   Title = {The involvement of inhibition function during garden-path
             recovery in sentence processing},
   Journal = {Technical Report of The institute of Electronics,
             Information and Communication Engineers},
   Volume = {110-163},
   Number = {(TL2010-18)},
   Year = {2010},
   Key = {fds185588}
}

@misc{fds185592,
   Author = {Hayashi, A. and Mazuka, A.},
   Title = {Infants’ speech perception between 5- and
             13-months},
   Journal = {Proceedings of Technical Committee of Psychological and
             Physiological Acoustics, Acoustical Society of
             Japan},
   Pages = {1-6},
   Year = {2010},
   Key = {fds185592}
}

@misc{fds169350,
   Author = {Miyazawa, K. and Kikuchi, H. and Shinya, T. and Mazuka,
             R.},
   Title = {The dynamic structure of vowels in infant-directed speech.
             –Riken Japanese Mother-Infant Conversation Corpus --(In
             Japanese; Tainyujihatsuwa no boin no jikan kozo, Riken
             Nihongo boshikaiwa kopasu o mochita bunseki.},
   Journal = {The Institute of Electronics, Information and communication
             engineers (IEICE), Technical Report},
   Volume = {SP2009},
   Number = {73},
   Pages = {67-72},
   Year = {2009},
   Key = {fds169350}
}

@misc{fds169348,
   Author = {R. Mazuka},
   Title = {Role of linguistic rhythm for language acquisition. (In
             Japanese;Gengokakutoku no kiban wo nasu rhythm
             ninchi)},
   Journal = {Gekkan Gengo (Japanese monthly magazin, "Language")},
   Volume = {38},
   Number = {6},
   Pages = {58-65},
   Publisher = {Taishukan Publishing Company (Tokyo)},
   Year = {2009},
   Key = {fds169348}
}

@misc{fds169353,
   Author = {Kitahara, M. and Nishikawa, K. and Igarashi, Y. and Shinya, T. and Mazuka, R},
   Title = {Charactiristics of pitch accents in infant-directed speech
             -An analysis of Riken Japanese Mother-Infant Conversation
             Corpus (In Japanese; Tai nyuuji hatsuwa ni okeru pitchi
             akusento no seishitsu ni tsuite; riken nihongo boshi kaiwa
             koopasu o tsukatta bunseki},
   Journal = {The Institute of Elecgtronics, Information and Communication
             Engeneers Technical Report},
   Volume = {NLC2008},
   Number = {46},
   Pages = {133-136},
   Year = {2008},
   Key = {fds169353}
}

@misc{fds141550,
   Author = {Y. Igarashi, and R. Mazuka},
   Title = {Exaggerated Prosody in Infant-directed Speech?: Intonational
             Phonological Analysis of Japanese Infant-Directed
             Speech},
   Journal = {Proceedings for Boston University Conference for Language
             Development},
   Volume = {32},
   Year = {2008},
   Key = {fds141550}
}

@misc{fds169354,
   Author = {Tajima, K. and Tanaka, K. and Mazuka, R},
   Title = {Does Japanese motherese help children acquire Japanese
             rhythm? -- Distributional analysis of moraic phonemes in
             infant-directed speech -- (In Japanese; hahaoya tokuyuu no
             hanashi kata wa nihongo rizumu no kakutoku ni yakudatsuka?
             --tainyuuji onsei ni okeru tokushuhaku onso no bunseki
             kara},
   Journal = {The Institute of Elecgtronics, Information and Communication
             Engeneers Technical Report},
   Volume = {SP2008},
   Number = {37},
   Pages = {99-104},
   Year = {2008},
   Key = {fds169354}
}

@misc{fds169355,
   Author = {Kondo, T. and Jincho, N. and Mazuka, R. and Hayashi,
             A},
   Title = {Influences of phonological length prosody in silent reading
             (In Japanese; Yomi no katei ni okeru onincho oyobi inritu no
             eikyo)},
   Journal = {The Institute of Electronics, Information communication
             engineers (IEICE) Technical Report},
   Volume = {TL2007},
   Number = {8},
   Pages = {41-46},
   Year = {2007},
   Key = {fds169355}
}

@misc{fds53294,
   Author = {Y. Igarashi and R. Mazuka},
   Title = {"Hahaoya tokuyuu no hanashikata (Motherese) wa otona no
             Nihongo to doochigau ka -- RIKEN Niongoboshikaiwa
             koopasu"(In Japanese: How do mothers speak differently to
             infants? -- RIKEN Mother-Infant Conversation Corpus
             --)},
   Journal = {The Institute of Elecgtronics, Information and Communication
             Engeneers Technical Report},
   Volume = {2006},
   Pages = {31-35},
   Year = {2006},
   Key = {fds53294}
}

@misc{fds53240,
   Author = {R. Mazuka and Y. Igarashi and K. Nishikawa},
   Title = {Input for learning Japanese: RIKEN Japanese Mother-infant
             Conversation Corpus},
   Journal = {IEICE Technical Report},
   Volume = {TL-2006-16},
   Pages = {11-15},
   Year = {2006},
   Key = {fds53240}
}


Duke University * Arts & Sciences * Linguistics * Faculty * Librarian * Staff * Reload * Login