Psychology and Neuroscience Faculty Database
Psychology and Neuroscience
Arts & Sciences
Duke University

 HOME > Arts & Sciences > pn > Faculty    Search Help Login pdf version printable version 

Publications of Stephen Mitroff    :chronological  alphabetical  combined listing:

%% Journal Articles   
@article{fds332884,
   Author = {Mitroff, SR and Ericson, JM and Sharpe, B},
   Title = {Predicting Airport Screening Officers’ Visual Search
             Competency With a Rapid Assessment},
   Journal = {Human Factors},
   Volume = {60},
   Number = {2},
   Pages = {201-211},
   Year = {2018},
   Month = {March},
   url = {http://dx.doi.org/10.1177/0018720817743886},
   Abstract = {© 2017, © 2017, Human Factors and Ergonomics Society.
             Objective: The study’s objective was to assess a new
             personnel selection and assessment tool for aviation
             security screeners. A mobile app was modified to create a
             tool, and the question was whether it could predict
             professional screeners’ on-job performance. Background: A
             variety of professions (airport security, radiology, the
             military, etc.) rely on visual search performance—being
             able to detect targets. Given the importance of such
             professions, it is necessary to maximize performance, and
             one means to do so is to select individuals who excel at
             visual search. A critical question is whether it is possible
             to predict search competency within a professional search
             environment. Method: Professional searchers from the USA
             Transportation Security Administration (TSA) completed a
             rapid assessment on a tablet-based X-ray simulator (XRAY
             Screener, derived from the mobile technology app Airport
             Scanner; Kedlin Company). The assessment contained 72 trials
             that were simulated X-ray images of bags. Participants
             searched for prohibited items and tapped on them with their
             finger. Results: Performance on the assessment significantly
             related to on-job performance measures for the TSA officers
             such that those who were better XRAY Screener performers
             were both more accurate and faster at the actual airport
             checkpoint. Conclusion: XRAY Screener successfully predicted
             on-job performance for professional aviation security
             officers. While questions remain about the underlying
             cognitive mechanisms, this quick assessment was found to
             significantly predict on-job success for a task that relies
             on visual search performance. Application: It may be
             possible to quickly assess an individual’s visual search
             competency, which could help organizations select new hires
             and assess their current workforce.},
   Doi = {10.1177/0018720817743886},
   Key = {fds332884}
}

@article{fds329322,
   Author = {Mitroff, SR and Sharpe, B},
   Title = {Using big data to solve real problems through academic and
             industry partnerships},
   Journal = {Current Opinion in Behavioral Sciences},
   Volume = {18},
   Pages = {91-96},
   Year = {2017},
   Month = {December},
   url = {http://dx.doi.org/10.1016/j.cobeha.2017.09.013},
   Abstract = {© 2017 Elsevier Ltd Big data has revolutionized a number of
             industries as it provides a powerful tool for asking and
             answering questions in novel ways. Academic researchers can
             join this trend and use immense and complex datasets to
             explore previously intractable questions. Yet, accessing and
             analyzing big data can be difficult. The goal of this
             chapter is to outline various benefits and challenges of
             using big data for academic purposes, and to provide
             thoughts on how to succeed. The primary suggestion is for
             academics to collaborate with appropriate industry partners
             to simultaneously achieve both theoretical and practical
             advances.},
   Doi = {10.1016/j.cobeha.2017.09.013},
   Key = {fds329322}
}

@article{fds327186,
   Author = {Biggs, AT and Clark, K and Mitroff, SR},
   Title = {Who should be searching? Differences in personality can
             affect visual search accuracy},
   Journal = {Personality and Individual Differences},
   Volume = {116},
   Pages = {353-358},
   Year = {2017},
   Month = {October},
   url = {http://dx.doi.org/10.1016/j.paid.2017.04.045},
   Doi = {10.1016/j.paid.2017.04.045},
   Key = {fds327186}
}

@article{fds331411,
   Author = {Ericson, JM and Kravitz, DJ and Mitroff, SR},
   Title = {Visual Search: You Are Who You Are (+ A Learning
             Curve)},
   Journal = {Perception},
   Pages = {030100661772109-030100661772109},
   Year = {2017},
   Month = {July},
   url = {http://dx.doi.org/10.1177/0301006617721091},
   Doi = {10.1177/0301006617721091},
   Key = {fds331411}
}

@article{fds323691,
   Author = {Chang, BP and Cain, D and Mitroff, SR},
   Title = {Emergency department crowding associated with differences in
             CXR interpretations between emergency physicians and
             radiologists.},
   Journal = {American Journal of Emergency Medicine},
   Volume = {35},
   Number = {5},
   Pages = {793-794},
   Year = {2017},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.ajem.2016.12.067},
   Doi = {10.1016/j.ajem.2016.12.067},
   Key = {fds323691}
}

@article{fds323486,
   Author = {Adamo, SH and Cain, MS and Mitroff, SR},
   Title = {An individual differences approach to multiple-target visual
             search errors: How search errors relate to different
             characteristics of attention},
   Journal = {Vision Research},
   Year = {2016},
   Month = {December},
   url = {http://dx.doi.org/10.1016/j.visres.2016.10.010},
   Doi = {10.1016/j.visres.2016.10.010},
   Key = {fds323486}
}

@article{fds323254,
   Author = {Devyatko, D and Appelbaum, LG and Mitroff, SR},
   Title = {A Common Mechanism for Perceptual Reversals in
             Motion-Induced Blindness, the Troxler Effect, and Perceptual
             Filling-In.},
   Journal = {Perception},
   Year = {2016},
   Month = {October},
   url = {http://dx.doi.org/10.1177/0301006616672577},
   Abstract = {Several striking visual phenomena involve a physically
             present stimulus that alternates between being perceived and
             being "invisible." For example, motion-induced blindness,
             the Troxler effect, and perceptual filling-in all consist of
             subjective alternations where an item repeatedly changes
             from being seen to unseen. In the present study, we explored
             whether these three specific visual phenomena share any
             commonalities in their alternation rates and patterns to
             better understand the mechanisms of each. Data from 69
             individuals revealed moderate to strong correlations across
             the three phenomena for the number of perceptual
             disappearances and the accumulated duration of the
             disappearances. Importantly, these effects were not
             correlated with eye movement patterns (saccades) assessed
             through eye tracking, differences in motion sensitivity as
             indexed by dot coherence and speed perception thresholds, or
             simple reaction time abilities. Principal component analyses
             revealed a single component that explained 67% of the
             variance for the number of perceptual reversals and 60% for
             the accumulated duration of the disappearances. The temporal
             dynamics of illusory disappearances was also compared for
             each phenomenon, and normalized durations of disappearances
             were well fit by a gamma distribution with similar shape
             parameters for each phenomenon, suggesting that they may be
             driven by a single oscillatory mechanism.},
   Doi = {10.1177/0301006616672577},
   Key = {fds323254}
}

@article{fds322022,
   Author = {Dowd, EW and Mitroff, SR and LaBar, KS},
   Title = {Fear generalization gradients in visuospatial
             attention.},
   Journal = {Emotion},
   Volume = {16},
   Number = {7},
   Pages = {1011-1018},
   Year = {2016},
   Month = {October},
   url = {http://dx.doi.org/10.1037/emo0000197},
   Abstract = {Fear learning can be adaptively advantageous, but only if
             the learning is integrated with higher-order cognitive
             processes that impact goal-directed behaviors. Recent work
             has demonstrated generalization (i.e., transfer) of
             conditioned fear across perceptual dimensions and conceptual
             categories, but it is not clear how fear generalization
             influences other cognitive processes. The current study
             investigated how associative fear learning impacts
             higher-order visuospatial attention, specifically in terms
             of attentional bias toward generalized threats (i.e., the
             heightened assessment of potentially dangerous stimuli). We
             combined discriminative fear conditioning of color stimuli
             with a subsequent visual search task, in which targets and
             distractors were presented inside colored circles that
             varied in perceptual similarity to the fear-conditioned
             color. Skin conductance responses validated the
             fear-conditioning manipulation. Search response times
             indicated that attention was preferentially deployed not
             just to the specific fear-conditioned color, but also to
             similar colors that were never paired with the aversive
             shock. Furthermore, this attentional bias decreased
             continuously and symmetrically from the fear-conditioned
             value along the color spectrum, indicating a generalization
             gradient based on perceptual similarity. These results
             support functional accounts of fear learning that promote
             broad, defensive generalization of attentional bias toward
             threat. (PsycINFO Database Record},
   Doi = {10.1037/emo0000197},
   Key = {fds322022}
}

@article{fds322023,
   Author = {Krasich, K and Ramger, B and Holton, L and Wang, L and Mitroff, SR and Gregory Appelbaum and L},
   Title = {Sensorimotor Learning in a Computerized Athletic Training
             Battery.},
   Journal = {Journal of motor behavior},
   Volume = {48},
   Number = {5},
   Pages = {401-412},
   Year = {2016},
   Month = {September},
   url = {http://dx.doi.org/10.1080/00222895.2015.1113918},
   Abstract = {Sensorimotor abilities are crucial for performance in
             athletic, military, and other occupational activities, and
             there is great interest in understanding learning in these
             skills. Here, behavioral performance was measured over three
             days as twenty-seven participants practiced multiple
             sessions on the Nike SPARQ Sensory Station (Nike, Inc.,
             Beaverton, Oregon), a computerized visual and motor
             assessment battery. Wrist-worn actigraphy was recorded to
             monitor sleep-wake cycles. Significant learning was observed
             in tasks with high visuomotor control demands but not in
             tasks of visual sensitivity. Learning was primarily linear,
             with up to 60% improvement, but did not relate to sleep
             quality in this normal-sleeping population. These results
             demonstrate differences in the rate and capacity for
             learning across perceptual and motor domains, indicating
             potential targets for sensorimotor training
             interventions.},
   Doi = {10.1080/00222895.2015.1113918},
   Key = {fds322023}
}

@article{fds302524,
   Author = {Biggs, AT and Mitroff, SR},
   Title = {Differences in multiple-target visual search performance
             between non-professional and professional searchers due to
             decision-making criteria.},
   Journal = {British Journal of Psychology},
   Volume = {106},
   Number = {4},
   Pages = {551-563},
   Year = {2015},
   Month = {November},
   ISSN = {0007-1269},
   url = {http://dx.doi.org/10.1111/bjop.12096},
   Abstract = {Professional visual searches, such as those conducted by
             airport security personnel, often demand highly accurate
             performance. As many factors can hinder accuracy, it is
             critical to understand the potential influences. Here, we
             examined how explicit decision-making criteria might affect
             multiple-target search performance. Non-professional
             searchers (college undergraduates) and professional
             searchers (airport security officers) classified trials as
             'safe' or 'dangerous', in one of two conditions. Those in
             the 'one = dangerous' condition classified trials as
             dangerous if they found one or two targets, and those in the
             'one = safe' condition only classified trials as dangerous
             if they found two targets. The data suggest an important
             role of context that may be mediated by experience;
             non-professional searchers were more likely to miss a second
             target in the one = dangerous condition (i.e., when
             finding a second found target did not change the
             classification), whereas professional searchers were more
             likely to miss a second in the one = safe
             condition.},
   Doi = {10.1111/bjop.12096},
   Key = {fds302524}
}

@article{fds322519,
   Author = {Adamo, SH and Cain, MS and Mitroff, SR},
   Title = {Satisfaction at last: Evidence for the “satisfaction”
             hypothesis for multiple-target search errors},
   Journal = {Visual Cognition},
   Volume = {23},
   Number = {7},
   Pages = {821-825},
   Year = {2015},
   Month = {August},
   url = {http://dx.doi.org/10.1080/13506285.2015.1093248},
   Doi = {10.1080/13506285.2015.1093248},
   Key = {fds322519}
}

@article{fds302516,
   Author = {Biggs, AT and Cain, MS and Mitroff, SR},
   Title = {Cognitive Training Can Reduce Civilian Casualties in a
             Simulated Shooting Environment.},
   Journal = {Psychological Science},
   Volume = {26},
   Number = {8},
   Pages = {1164-1176},
   Year = {2015},
   Month = {August},
   ISSN = {0956-7976},
   url = {http://dx.doi.org/10.1177/0956797615579274},
   Abstract = {Shooting a firearm involves a complex series of cognitive
             abilities. For example, locating an item or a person of
             interest requires visual search, and firing the weapon (or
             withholding a trigger squeeze) involves response execution
             (or inhibition). The present study used a simulated shooting
             environment to establish a relationship between a particular
             cognitive ability and a critical shooting error-response
             inhibition and firing on civilians, respectively.
             Individual-difference measures demonstrated, perhaps
             counterintuitively, that simulated civilian casualties were
             not related to motor impulsivity (i.e., an itchy trigger
             finger) but rather to an individual's cognitive ability to
             withhold an already initiated response (i.e., an itchy
             brain). Furthermore, active-response-inhibition training
             reduced simulated civilian casualties, which revealed a
             causal relationship. This study therefore illustrates the
             potential of using cognitive training to possibly improve
             shooting performance, which might ultimately provide insight
             for military and law-enforcement personnel.},
   Doi = {10.1177/0956797615579274},
   Key = {fds302516}
}

@article{fds302519,
   Author = {Wang, L and Krasich, K and Bel-Bahar, T and Hughes, L and Mitroff, SR and Appelbaum, LG},
   Title = {Mapping the structure of perceptual and visual-motor
             abilities in healthy young adults.},
   Journal = {Acta Psychologica},
   Volume = {157},
   Pages = {74-84},
   Year = {2015},
   Month = {May},
   ISSN = {0001-6918},
   url = {http://hdl.handle.net/10161/10643 Duke open
             access},
   Abstract = {The ability to quickly detect and respond to visual stimuli
             in the environment is critical to many human activities.
             While such perceptual and visual-motor skills are important
             in a myriad of contexts, considerable variability exists
             between individuals in these abilities. To better understand
             the sources of this variability, we assessed perceptual and
             visual-motor skills in a large sample of 230 healthy
             individuals via the Nike SPARQ Sensory Station, and compared
             variability in their behavioral performance to demographic,
             state, sleep and consumption characteristics. Dimension
             reduction and regression analyses indicated three underlying
             factors: Visual-Motor Control, Visual Sensitivity, and Eye
             Quickness, which accounted for roughly half of the overall
             population variance in performance on this battery.
             Inter-individual variability in Visual-Motor Control was
             correlated with gender and circadian patters such that
             performance on this factor was better for males and for
             those who had been awake for a longer period of time before
             assessment. The current findings indicate that abilities
             involving coordinated hand movements in response to stimuli
             are subject to greater individual variability, while visual
             sensitivity and occulomotor control are largely stable
             across individuals.},
   Doi = {10.1016/j.actpsy.2015.02.005},
   Key = {fds302519}
}

@article{fds302517,
   Author = {Clark, K and Appelbaum, LG and van den Berg, B and Mitroff, SR and Woldorff, MG},
   Title = {Improvement in visual search with practice: mapping
             learning-related changes in neurocognitive stages of
             processing.},
   Journal = {The Journal of neuroscience : the official journal of the
             Society for Neuroscience},
   Volume = {35},
   Number = {13},
   Pages = {5351-5359},
   Year = {2015},
   Month = {April},
   ISSN = {0270-6474},
   url = {http://hdl.handle.net/10161/10641 Duke open
             access},
   Abstract = {Practice can improve performance on visual search tasks; the
             neural mechanisms underlying such improvements, however, are
             not clear. Response time typically shortens with practice,
             but which components of the stimulus-response processing
             chain facilitate this behavioral change? Improved search
             performance could result from enhancements in various
             cognitive processing stages, including (1) sensory
             processing, (2) attentional allocation, (3) target
             discrimination, (4) motor-response preparation, and/or (5)
             response execution. We measured event-related potentials
             (ERPs) as human participants completed a five-day
             visual-search protocol in which they reported the
             orientation of a color popout target within an array of
             ellipses. We assessed changes in behavioral performance and
             in ERP components associated with various stages of
             processing. After practice, response time decreased in all
             participants (while accuracy remained consistent), and
             electrophysiological measures revealed modulation of several
             ERP components. First, amplitudes of the early
             sensory-evoked N1 component at 150 ms increased bilaterally,
             indicating enhanced visual sensory processing of the array.
             Second, the negative-polarity posterior-contralateral
             component (N2pc, 170-250 ms) was earlier and larger,
             demonstrating enhanced attentional orienting. Third, the
             amplitude of the sustained posterior contralateral
             negativity component (SPCN, 300-400 ms) decreased,
             indicating facilitated target discrimination. Finally,
             faster motor-response preparation and execution were
             observed after practice, as indicated by latency changes in
             both the stimulus-locked and response-locked lateralized
             readiness potentials (LRPs). These electrophysiological
             results delineate the functional plasticity in key
             mechanisms underlying visual search with high temporal
             resolution and illustrate how practice influences various
             cognitive and neural processing stages leading to enhanced
             behavioral performance.},
   Doi = {10.1523/jneurosci.1152-14.2015},
   Key = {fds302517}
}

@article{fds302518,
   Author = {Dowd, EW and Kiyonaga, A and Egner, T and Mitroff,
             SR},
   Title = {Attentional guidance by working memory differs by paradigm:
             an individual-differences approach.},
   Journal = {Attention, Perception, & Psychophysics},
   Volume = {77},
   Number = {3},
   Pages = {704-712},
   Year = {2015},
   Month = {April},
   ISSN = {1943-3921},
   url = {http://dx.doi.org/10.3758/s13414-015-0847-z},
   Abstract = {The contents of working memory (WM) have been repeatedly
             found to guide the allocation of visual attention; in a
             dual-task paradigm that combines WM and visual search,
             actively holding an item in WM biases visual attention
             towards memory-matching items during search (e.g., Soto et
             al., Journal of Experimental Psychology: Human Perception
             and Performance, 31(2), 248-261, 2005). A key debate is
             whether such memory-based attentional guidance is automatic
             or under strategic control. Generally, two distinct task
             paradigms have been employed to assess memory-based
             guidance, one demonstrating that attention is involuntarily
             captured by memory-matching stimuli even at a cost to search
             performance (Soto et al., 2005), and one demonstrating that
             participants can strategically avoid memory-matching
             distractors to facilitate search performance (Woodman &
             Luck, Journal of Experimental Psychology: Human Perception
             and Performance, 33(2), 363-377, 2007). The current study
             utilized an individual-differences approach to examine why
             the different paradigms--which presumably tap into the same
             attentional construct--might support contrasting
             interpretations. Participants completed a battery of
             cognitive tasks, including two types of attentional guidance
             paradigms (see Soto et al., 2005; Woodman & Luck, 2007), a
             visual WM task, and an operation span task, as well as
             attention-related self-report assessments. Performance on
             the two attentional guidance paradigms did not correlate.
             Subsequent exploratory regression analyses revealed that
             memory-based guidance in each task was differentially
             predicted by visual WM capacity for one paradigm, and by
             attention-related assessment scores for the other paradigm.
             The current results suggest that these two paradigms--which
             have previously produced contrasting patterns of
             performance--may probe distinct aspects of attentional
             guidance.},
   Doi = {10.3758/s13414-015-0847-z},
   Key = {fds302518}
}

@article{fds302520,
   Author = {Biggs, AT and Adamo, SH and Dowd, EW and Mitroff,
             SR},
   Title = {Examining perceptual and conceptual set biases in
             multiple-target visual search.},
   Journal = {Attention, Perception, & Psychophysics},
   Volume = {77},
   Number = {3},
   Pages = {844-855},
   Year = {2015},
   Month = {April},
   ISSN = {1943-3921},
   url = {http://dx.doi.org/10.3758/s13414-014-0822-0},
   Abstract = {Visual search is a common practice conducted countless times
             every day, and one important aspect of visual search is that
             multiple targets can appear in a single search array. For
             example, an X-ray image of airport luggage could contain
             both a water bottle and a gun. Searchers are more likely to
             miss additional targets after locating a first target in
             multiple-target searches, which presents a potential
             problem: If airport security officers were to find a water
             bottle, would they then be more likely to miss a gun? One
             hypothetical cause of multiple-target search errors is that
             searchers become biased to detect additional targets that
             are similar to a found target, and therefore become less
             likely to find additional targets that are dissimilar to the
             first target. This particular hypothesis has received
             theoretical, but little empirical, support. In the present
             study, we tested the bounds of this idea by utilizing "big
             data" obtained from the mobile application Airport Scanner.
             Multiple-target search errors were substantially reduced
             when the two targets were identical, suggesting that the
             first-found target did indeed create biases during
             subsequent search. Further analyses delineated the nature of
             the biases, revealing both a perceptual set bias (i.e., a
             bias to find additional targets with features similar to
             those of the first-found target) and a conceptual set bias
             (i.e., a bias to find additional targets with a conceptual
             relationship to the first-found target). These biases are
             discussed in terms of the implications for visual-search
             theories and applications for professional visual
             searchers.},
   Doi = {10.3758/s13414-014-0822-0},
   Key = {fds302520}
}

@article{fds322520,
   Author = {Mitroff, SR and Biggs, AT and Adamo, SH and Dowd, EW and Winkle, J and Clark, K},
   Title = {What can 1 billion trials tell us about visual
             search?},
   Journal = {Journal of Experimental Psychology: Human Perception and
             Performance},
   Volume = {41},
   Number = {1},
   Pages = {1-5},
   Publisher = {American Psychological Association Inc.},
   Year = {2015},
   Month = {February},
   url = {http://dx.doi.org/10.1037/xhp0000012},
   Abstract = {Mobile technology (e.g., smartphones and tablets) has
             provided psychologists with a wonderful opportunity: through
             careful design and implementation, mobile applications can
             be used to crowd source data collection. By garnering
             massive amounts of data from a wide variety of individuals,
             it is possible to explore psychological questions that have,
             to date, been out of reach. Here we discuss 2 examples of
             how data from the mobile game Airport Scanner (Kedlin Co.,
             http://www.airportscannergame.com) can be used to address
             questions about the nature of visual search that pose
             intractable problems for laboratory-based research. Airport
             Scanner is a successful mobile game with millions of unique
             users and billions of individual trials, which allows for
             examining nuanced visual search questions. The goals of the
             current Observation Report were to highlight the growing
             opportunity that mobile technology affords psychological
             research and to provide an example roadmap of how to
             successfully collect usable data.},
   Doi = {10.1037/xhp0000012},
   Key = {fds322520}
}

@article{fds302515,
   Author = {Biggs, AT and Adamo, SH and Mitroff, SR},
   Title = {Mo' money, mo' problems: Monetary motivation can exacerbate
             the attentional blink.},
   Journal = {Perception},
   Volume = {44},
   Number = {4},
   Pages = {410-422},
   Year = {2015},
   Month = {January},
   ISSN = {0301-0066},
   url = {http://dx.doi.org/10.1068/p7916},
   Abstract = {The attentional blink (AB) is a compelling psychological
             phenomenon wherein observers are less likely to identify a
             second target (T2) when it appears approximately 200 ms
             after a first target (T1) in a rapidly presented stream of
             items. The present investigation examined how monetary
             motivation could impact the AB when participants were
             differentially motivated to identify T1 versus T2.
             Participants completed one of three conditions where the
             only difference across conditions was a motivational
             manipulation: a standard AB task (control condition), a
             motivated condition with T1 worth double the points of T2,
             or a motivated condition with T1 worth half the points of T2
             (points in the motivated conditions were linked to a
             possible monetary bonus). Motivation had an expected
             influence on overall performance as both motivated
             conditions had higher overall T1 accuracy relative to the
             control condition. More specific to the question at hand,
             the AB was exacerbated (ie T2 performance was worse shortly
             after T1) when T1 was worth more than T2. This finding
             suggests that participants overallocated attentional
             resources to T1 processing at the expense of T2 processing,
             and it supports current theories of the AB.},
   Doi = {10.1068/p7916},
   Key = {fds302515}
}

@article{fds302522,
   Author = {Biggs, AT and Mitroff, SR},
   Title = {Improving the Efficacy of Security Screening Tasks: A Review
             of Visual Search Challenges and Ways to Mitigate Their
             Adverse Effects},
   Journal = {Applied Cognitive Psychology},
   Volume = {29},
   Number = {1},
   Pages = {142-148},
   Year = {2015},
   Month = {January},
   ISSN = {0888-4080},
   url = {http://dx.doi.org/10.1002/acp.3083},
   Doi = {10.1002/acp.3083},
   Key = {fds302522}
}

@article{fds322522,
   Author = {Adamo, SH and Cain, MS and Mitroff, SR},
   Title = {Targets Need Their Own Personal Space: Effects of Clutter on
             Multiple-Target Search Accuracy.},
   Journal = {Perception},
   Volume = {44},
   Number = {10},
   Pages = {1203-1214},
   Year = {2015},
   Month = {January},
   url = {http://dx.doi.org/10.1177/0301006615594921},
   Abstract = {Visual search is an essential task for many lifesaving
             professions; airport security personnel search baggage X-ray
             images for dangerous items and radiologists examine
             radiographs for tumors. Accuracy is critical for such
             searches; however, there are potentially negative influences
             that can affect performance; for example, the displays can
             be cluttered and can contain multiple targets. Previous
             research has demonstrated that clutter can hurt search
             performance and a second target is less likely to be
             detected in a multiple-target search after a first target
             has been found, which raises a concern-how does clutter
             affect multiple-target search performance? The current study
             explored clutter in a multiple-target search paradigm, where
             there could be one or two targets present, and targets
             appeared in varying levels of clutter. There was a
             significant interaction between clutter and target number:
             Increasing levels of clutter did not affect single-target
             detection but did reduce detection of a second target.
             Multiple-target search accuracy is known to be sensitive to
             contextual influences, and the current results reveal a
             specific effect wherein clutter disproportionally affected
             multiple-target search accuracy. These results suggest that
             the detection and processing of a first target might enhance
             the masking effects of clutter around a second
             target.},
   Doi = {10.1177/0301006615594921},
   Key = {fds322522}
}

@article{fds326209,
   Author = {Mitroff, SR and Biggs, AT and Adamo, SH and Dowd, EW and Winkle, J and Clark, K},
   Title = {What Can 1 Billion Trials Tell Us About Visual
             Search?},
   Journal = {Journal of Experimental Psychology: Human Perception and
             Performance},
   Volume = {41},
   Number = {1},
   Pages = {1-5},
   Publisher = {American Psychological Association Inc.},
   Year = {2014},
   Month = {December},
   url = {http://dx.doi.org/10.1037/xhp0000012},
   Abstract = {Mobile technology (e.g., smartphones and tablets) has
             provided psychologists with a wonderful opportunity: through
             careful design and implementation, mobile applications can
             be used to crowd source data collection. By garnering
             massive amounts of data from a wide variety of individuals,
             it is possible to explore psychological questions that have,
             to date, been out of reach. Here we discuss 2 examples of
             how data from the mobile game Airport Scanner (Kedlin Co.,
             http://www.airportscannergame.com) can be used to address
             questions about the nature of visual search that pose
             intractable problems for laboratory-based research. Airport
             Scanner is a successful mobile game with millions of unique
             users and billions of individual trials, which allows for
             examining nuanced visual search questions. The goals of the
             current Observation Report were to highlight the growing
             opportunity that mobile technology affords psychological
             research and to provide an example roadmap of how to
             successfully collect usable data. (PsycINFO Database
             Record},
   Doi = {10.1037/xhp0000012},
   Key = {fds326209}
}

@article{fds302525,
   Author = {Biggs, AT and Adamo, SH and Mitroff, SR},
   Title = {Rare, but obviously there: effects of target frequency and
             salience on visual search accuracy.},
   Journal = {Acta Psychologica},
   Volume = {152},
   Pages = {158-165},
   Year = {2014},
   Month = {October},
   ISSN = {0001-6918},
   url = {http://dx.doi.org/10.1016/j.actpsy.2014.08.005},
   Abstract = {Accuracy can be extremely important for many visual search
             tasks. However, numerous factors work to undermine
             successful search. Several negative influences on search
             have been well studied, yet one potentially influential
             factor has gone almost entirely unexplored-namely, how is
             search performance affected by the likelihood that a
             specific target might appear? A recent study demonstrated
             that when specific targets appear infrequently (i.e., once
             in every thousand trials) they were, on average, not often
             found. Even so, some infrequently appearing targets were
             actually found quite often, suggesting that the targets'
             frequency is not the only factor at play. Here, we
             investigated whether salience (i.e., the extent to which an
             item stands out during search) could explain why some
             infrequent targets are easily found whereas others are
             almost never found. Using the mobile application Airport
             Scanner, we assessed how individual target frequency and
             salience interacted in a visual search task that included a
             wide array of targets and millions of trials. Target
             frequency and salience were both significant predictors of
             search accuracy, although target frequency explained more of
             the accuracy variance. Further, when examining only the
             rarest target items (those that appeared on less than 0.15%
             of all trials), there was a significant relationship between
             salience and accuracy such that less salient items were less
             likely to be found. Beyond implications for search theory,
             these data suggest significant vulnerability for real-world
             searches that involve targets that are both infrequent and
             hard-to-spot.},
   Doi = {10.1016/j.actpsy.2014.08.005},
   Key = {fds302525}
}

@article{fds253046,
   Author = {Cain, MS and Biggs, AT and Darling, EF and Mitroff,
             SR},
   Title = {A little bit of history repeating: Splitting up
             multiple-target visual searches decreases second-target miss
             errors.},
   Journal = {Journal of Experimental Psychology: Applied},
   Volume = {20},
   Number = {2},
   Pages = {112-125},
   Year = {2014},
   Month = {June},
   ISSN = {1076-898X},
   url = {http://dx.doi.org/10.1037/xap0000014},
   Abstract = {Visual searches with several targets in a display have been
             shown to be particularly prone to miss errors in both
             academic laboratory searches and professional searches such
             as radiology and baggage screening. Specifically, finding 1
             target in a display can reduce the likelihood of detecting
             additional targets. This phenomenon was originally referred
             to as "satisfaction of search," but is referred to here as
             "subsequent search misses" (SSMs). SSM errors have been
             linked to a variety of causes, and recent evidence supports
             a working memory deficit wherein finding a target consumes
             working memory resources that would otherwise aid subsequent
             search for additional targets (Cain & Mitroff, 2013). The
             current study demonstrated that dividing 1 multiple-target
             search into several single-target searches, separated by
             three to five unrelated trials, effectively freed the
             working memory resources used by the found target and
             eliminated SSM errors. This effect was demonstrated with
             both university community participants and with professional
             visual searchers from the Transportation Security
             Administration, suggesting it may be a generally applicable
             technique for improving multiple-target visual search
             accuracy.},
   Doi = {10.1037/xap0000014},
   Key = {fds253046}
}

@article{fds253051,
   Author = {Mitroff, SR and Biggs, AT},
   Title = {The ultra-rare-item effect: visual search for exceedingly
             rare items is highly susceptible to error.},
   Journal = {Psychological Science},
   Volume = {25},
   Number = {1},
   Pages = {284-289},
   Year = {2014},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/24270463},
   Abstract = {Accuracy is paramount in radiology and security screening,
             yet many factors undermine success. Target prevalence is a
             particularly worrisome factor, as targets are rarely present
             (e.g., the cancer rate in mammography is ~0.5%), and low
             target prevalence has been linked to increased search
             errors. More troubling is the fact that specific target
             types can have extraordinarily low frequency rates (e.g.,
             architectural distortions in mammography-a specific marker
             of potential cancer-appear in fewer than 0.05% of cases). By
             assessing search performance across millions of trials from
             the Airport Scanner smartphone application, we demonstrated
             that the detection of ultra-rare items was disturbingly
             poor. A logarithmic relationship between target detection
             and target frequency (adjusted R (2) = .92) revealed that
             ultra-rare items had catastrophically low detection rates
             relative to targets with higher frequencies. Extraordinarily
             low search performance for these extraordinarily rare
             targets-what we term the ultra-rare-item effect-is troubling
             given that radiological and security-screening searches are
             primarily ultra-rare-item searches. },
   Doi = {10.1177/0956797613504221},
   Key = {fds253051}
}

@article{fds253056,
   Author = {Clark, K and Cain, MS and Adcock, RA and Mitroff,
             SR},
   Title = {Context matters: The structure of task goals affects
             accuracy in multiple-target visual search},
   Journal = {Applied Ergonomics},
   Volume = {45},
   Number = {3},
   Pages = {528-533},
   Year = {2014},
   Month = {January},
   ISSN = {0003-6870},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23957930},
   Abstract = {Career visual searchers such as radiologists and airport
             security screeners strive to conduct accurate visual
             searches, but despite extensive training, errors still
             occur. A key difference between searches in radiology and
             airport security is the structure of the search task:
             Radiologists typically scan a certain number of medical
             images (fixed objective), and airport security screeners
             typically search X-rays for a specified time period (fixed
             duration). Might these structural differences affect
             accuracy? We compared performance on a search task
             administered either under constraints that approximated
             radiology or airport security. Some displays contained more
             than one target because the presence of multiple targets is
             an established source of errors for career searchers, and
             accuracy for additional targets tends to be especially
             sensitive to contextual conditions. Results indicate that
             participants searching within the fixed objective framework
             produced more multiple-target search errors; thus, adopting
             a fixed duration framework could improve accuracy for career
             searchers. © 2013 Elsevier Ltd and The Ergonomics
             Society.},
   Doi = {10.1016/j.apergo.2013.07.008},
   Key = {fds253056}
}

@article{fds253050,
   Author = {Biggs, AT and Mitroff, SR},
   Title = {Different predictors of multiple-target search accuracy
             between nonprofessional and professional visual
             searchers.},
   Journal = {Quarterly Journal of Experimental Psychology},
   Volume = {67},
   Number = {7},
   Pages = {1335-1348},
   Year = {2014},
   ISSN = {1747-0218},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/24266390},
   Abstract = {Visual search, locating target items among distractors,
             underlies daily activities ranging from critical tasks
             (e.g., looking for dangerous objects during security
             screening) to commonplace ones (e.g., finding your friends
             in a crowded bar). Both professional and nonprofessional
             individuals conduct visual searches, and the present
             investigation is aimed at understanding how they perform
             similarly and differently. We administered a multiple-target
             visual search task to both professional (airport security
             officers) and nonprofessional participants (members of the
             Duke University community) to determine how search abilities
             differ between these populations and what factors might
             predict accuracy. There were minimal overall accuracy
             differences, although the professionals were generally
             slower to respond. However, the factors that predicted
             accuracy varied drastically between groups; variability in
             search consistency-how similarly an individual searched from
             trial to trial in terms of speed-best explained accuracy for
             professional searchers (more consistent professionals were
             more accurate), whereas search speed-how long an individual
             took to complete a search when no targets were present-best
             explained accuracy for nonprofessional searchers (slower
             nonprofessionals were more accurate). These findings suggest
             that professional searchers may utilize different search
             strategies from those of nonprofessionals, and that search
             consistency, in particular, may provide a valuable tool for
             enhancing professional search accuracy.},
   Doi = {10.1080/17470218.2013.859715},
   Key = {fds253050}
}

@article{fds253055,
   Author = {Adamo, SH and Cain, MS and Mitroff, SR},
   Title = {Self-induced attentional blink: a cause of errors in
             multiple-target search.},
   Journal = {Psychological Science},
   Volume = {24},
   Number = {12},
   Pages = {2569-2574},
   Year = {2013},
   Month = {December},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/24142814},
   Abstract = {Satisfaction of search (which we refer to as subsequent
             search misses)-a decrease in accuracy at detecting a second
             target after a first target has been found in a visual
             search-underlies real-world search errors (e.g., tumors may
             be missed in an X-ray if another tumor already has been
             found), but little is known about this phenomenon's
             cognitive underpinnings. In the present study, we examined
             subsequent search misses in terms of another, more
             extensively studied phenomenon: the attentional blink, a
             decrease in accuracy when a second target appears 200 to 500
             ms after a first target is detected in a temporal stream.
             Participants searched for T-shaped targets among L-shaped
             distractors in a spatial visual search, and despite large
             methodological differences between self-paced spatial visual
             searches and attentional blink tasks, an
             attentional-blink-like effect accounted for
             subsequent-search-miss errors. This finding provides
             evidence that accuracy is negatively affected shortly after
             a first target is fixated in a self-paced, self-guided
             visual search. },
   Doi = {10.1177/0956797613497970},
   Key = {fds253055}
}

@article{fds253059,
   Author = {Dowd, EW and Mitroff, SR},
   Title = {Attentional guidance by working memory overrides salience
             cues in visual search.},
   Journal = {Journal of Experimental Psychology: Human Perception and
             Performance},
   Volume = {39},
   Number = {6},
   Pages = {1786-1796},
   Year = {2013},
   Month = {December},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23565744},
   Abstract = {Many factors influence visual search, including how much
             targets stand out (i.e., their visual salience) and whether
             they are currently relevant (i.e., Are they in working
             memory?). Although these are two known influences on search
             performance, it is unclear how they interact to guide
             attention. The present study explored this interplay by
             having participants hold an item in memory for a subsequent
             test while simultaneously conducting a multiple-target
             visual search. Importantly, the memory item could match one
             or neither of two targets from the search. In Experiment 1,
             when the memory item did not match either target,
             participants found a high-salience target first,
             demonstrating a baseline salience effect. This effect was
             exaggerated when a high-salience target was in working
             memory and completely reversed when a low-salience target
             was in memory, demonstrating a powerful influence of working
             memory guidance. Experiment 2 amplified the salience effect
             by including very high-salience, "pop-out"-like targets. Yet
             this salience effect was still attenuated when the memory
             item matched a less salient target. Experiment 3 confirmed
             these were memory-based effects and not priming.
             Collectively, these findings illustrate the influential role
             of working memory in guiding visual attention, even in the
             face of competing bottom-up salience cues.},
   Doi = {10.1037/a0032548},
   Key = {fds253059}
}

@article{fds253052,
   Author = {Jackson, TH and Mitroff, SR and Clark, K and Proffit, WR and Lee, JY and Nguyen, TT},
   Title = {Face symmetry assessment abilities: Clinical implications
             for diagnosing asymmetry.},
   Journal = {American Journal of Orthodontics and Dentofacial
             Orthopedics},
   Volume = {144},
   Number = {5},
   Pages = {663-671},
   Year = {2013},
   Month = {November},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/24182582},
   Abstract = {INTRODUCTION: An accurate assessment of face symmetry is
             necessary for the development of a dentofacial diagnosis in
             orthodontics, and an understanding of individual differences
             in perception of face symmetry between patients and
             providers is needed to facilitate successful treatment.
             METHODS: Orthodontists, general dentists, and control
             participants completed a series of tasks to assess symmetry.
             Judgments were made on pairs of upright faces (similar to
             the longitudinal assessment of photographic patient
             records), inverted faces, and dot patterns. Participants
             completed questionnaires regarding clinical practice,
             education level, and self-confidence ratings for symmetry
             assessment abilities. RESULTS: Orthodontists showed
             expertise compared with controls (P <0.001), whereas
             dentists showed no advantage over controls. Orthodontists
             performed better than dentists, however, in only the most
             difficult face symmetry judgments (P = 0.006). For both
             orthodontists and dentists, accuracy increased significantly
             when assessing symmetry in upright vs inverted faces (t =
             3.7, P = 0.001; t = 2.7, P = 0.02, respectively).
             CONCLUSIONS: Orthodontists showed expertise in assessing
             face symmetry compared with both laypersons and general
             dentists, and they were more accurate when judging upright
             than inverted faces. When using accurate longitudinal
             photographic records to assess changing face symmetry,
             orthodontists are likely to be incorrect in less than 15% of
             cases, suggesting that assistance from some additional
             technology is infrequently needed for diagnosis.},
   Doi = {10.1016/j.ajodo.2013.06.020},
   Key = {fds253052}
}

@article{fds253045,
   Author = {Harris, JA and Barack, DL and McMahon, AR and Mitroff, SR and Woldorff,
             MG},
   Title = {Object-Category Processing, Perceptual Awareness, and the
             Role of Attention during Motion-Induced Blindness},
   Pages = {97-106},
   Year = {2013},
   Month = {October},
   url = {http://dx.doi.org/10.1016/B978-0-12-398451-7.00008-7},
   Abstract = {Perceptual information represented in the brain, whether a
             viewer is aware of it or not, holds the potential to
             influence subsequent behavior. Here we tracked a
             well-established event-related-potential (ERP) measure of
             visual-object-category processing, the face-specific
             ventrolateral-occipital N170 response, across conditions of
             perceptual awareness. To manipulate perceptual awareness, we
             employed the motion-induced-blindness (MIB) paradigm, in
             which covertly attended, static, visual-target stimuli that
             are superimposed on a globally moving array of distractors
             perceptually disappear and reappear. Subjects responded with
             a button press when the target images (faces and houses)
             actually physically occurred (and thus perceptually
             appeared) and when they perceptually reappeared after an MIB
             episode. A comparison of the face-specific N170 ERP activity
             (face-vs-house responses) revealed robust face-selective ERP
             activity for physically appearing images and no such
             activity for perceptual reappearances following MIB
             episodes, suggesting that face-specific processing had
             continued uninterrupted during MIB. In addition,
             electrophysiological activity preceding an actual appearance
             of a target image, collapsed across face and house image
             types, was compared to that preceding the perceptual
             reappearance of a continuously present image (following
             MIB). This comparison revealed a parietally distributed
             positive-polarity response that preceded only reappearances
             following MIB. Such a result suggests a possible role of
             parietally mediated attentional capture by the
             present-but-suppressed target in the reestablishment of
             perceptual awareness at the end of an MIB episode. The
             present results provide insight into the level of visual
             processing that can occur in the absence of awareness, as
             well as into the mechanisms underlying MIB and its influence
             on perceptual awareness. © 2014 Elsevier Inc. All rights
             reserved.},
   Doi = {10.1016/B978-0-12-398451-7.00008-7},
   Key = {fds253045}
}

@article{fds253071,
   Author = {Cain, MS and Mitroff, SR},
   Title = {Memory for found targets interferes with subsequent
             performance in multiple-target visual search.},
   Journal = {Journal of Experimental Psychology: Human Perception and
             Performance},
   Volume = {39},
   Number = {5},
   Pages = {1398-1408},
   Year = {2013},
   Month = {October},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23163788},
   Abstract = {Multiple-target visual searches--when more than 1 target can
             appear in a given search display--are commonplace in
             radiology, airport security screening, and the military.
             Whereas 1 target is often found accurately, additional
             targets are more likely to be missed in multiple-target
             searches. To better understand this decrement in 2nd-target
             detection, here we examined 2 potential forms of
             interference that can arise from finding a 1st target:
             interference from the perceptual salience of the 1st target
             (a now highly relevant distractor in a known location) and
             interference from a newly created memory representation for
             the 1st target. Here, we found that removing found targets
             from the display or making them salient and easily
             segregated color singletons improved subsequent search
             accuracy. However, replacing found targets with random
             distractor items did not improve subsequent search accuracy.
             Removing and highlighting found targets likely reduced both
             a target's visual salience and its memory load, whereas
             replacing a target removed its visual salience but not its
             representation in memory. Collectively, the current
             experiments suggest that the working memory load of a found
             target has a larger effect on subsequent search accuracy
             than does its perceptual salience.},
   Doi = {10.1037/a0030726},
   Key = {fds253071}
}

@article{fds253048,
   Author = {Cain, MS and Adamo, SH and Mitroff, SR},
   Title = {A taxonomy of errors in multiple-target visual
             search},
   Journal = {Visual Cognition},
   Volume = {21},
   Number = {7},
   Pages = {899-921},
   Year = {2013},
   Month = {August},
   ISSN = {1350-6285},
   url = {http://dx.doi.org/10.1080/13506285.2013.843627},
   Abstract = {Multiple-target visual searches are especially error prone;
             once one target is found, additional targets are likely to
             be missed. This phenomenon, often called satisfaction of
             search (which we refer to here as subsequent search misses;
             SSMs), is well known in radiology, despite no existing
             consensus about the underlying cause(s). Taking a cognitive
             laboratory approach, we propose that there are multiple
             causes of SSMs and present a taxonomy of SSMs based on
             searchers' eye movements during a multiple-target search
             task, including both previously identified and novel sources
             of SSMs. The types and distributions of SSMs revealed
             effects of working memory load, search strategy, and
             additional causal factors, suggesting that there is no
             single cause of SSMs. A multifaceted approach is likely
             needed to understand the psychological causes of SSMs and
             then to mitigate them in applied settings such as radiology
             and baggage screening. © 2013 Taylor & Francis.},
   Doi = {10.1080/13506285.2013.843627},
   Key = {fds253048}
}

@article{fds253058,
   Author = {Appelbaum, LG and Cain, MS and Darling, EF and Mitroff,
             SR},
   Title = {Action video game playing is associated with improved visual
             sensitivity, but not alterations in visual sensory
             memory.},
   Journal = {Attention, Perception, & Psychophysics},
   Volume = {75},
   Number = {6},
   Pages = {1161-1167},
   Year = {2013},
   Month = {August},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23709062},
   Abstract = {Action video game playing has been experimentally linked to
             a number of perceptual and cognitive improvements. These
             benefits are captured through a wide range of psychometric
             tasks and have led to the proposition that action video game
             experience may promote the ability to extract statistical
             evidence from sensory stimuli. Such an advantage could arise
             from a number of possible mechanisms: improvements in visual
             sensitivity, enhancements in the capacity or duration for
             which information is retained in visual memory, or
             higher-level strategic use of information for decision
             making. The present study measured the capacity and time
             course of visual sensory memory using a partial report
             performance task as a means to distinguish between these
             three possible mechanisms. Sensitivity measures and
             parameter estimates that describe sensory memory capacity
             and the rate of memory decay were compared between
             individuals who reported high evels and low levels of action
             video game experience. Our results revealed a uniform
             increase in partial report accuracy at all stimulus-to-cue
             delays for action video game players but no difference in
             the rate or time course of the memory decay. The present
             findings suggest that action video game playing may be
             related to enhancements in the initial sensitivity to visual
             stimuli, but not to a greater retention of information in
             iconic memory buffers. },
   Doi = {10.3758/s13414-013-0472-7},
   Key = {fds253058}
}

@article{fds220620,
   Author = {Mitroff, S. R. and Friesen, P. and Bennett, D. and Yoo, H. and Reichow, A.},
   Title = {Enhancing ice hockey skills through stroboscopic visual
             training—A pilot study},
   Journal = {Athletic Training & Sports Health Care},
   Volume = {5},
   Pages = {261-264},
   Year = {2013},
   url = {http://dx.doi.org/10.3928/19425864-20131030-02},
   Doi = {10.3928/19425864-20131030-02},
   Key = {fds220620}
}

@article{fds253053,
   Author = {Cain, MS and Adamo, SH and Mitroff, SR},
   Title = {A taxonomy of errors in multiple-target visual
             search},
   Journal = {Visual Cognition},
   Year = {2013},
   ISSN = {1350-6285},
   Key = {fds253053}
}

@article{fds253054,
   Author = {Jackson, TH and Clark, K and Mitroff, SR},
   Title = {Enhanced facial symmetry assessment in orthodontists},
   Journal = {Visual Cognition},
   Volume = {21},
   Number = {7},
   Pages = {838-852},
   Year = {2013},
   ISSN = {1350-6285},
   url = {http://dx.doi.org/10.1080/13506285.2013.832450},
   Abstract = {Assessing facial symmetry is an evolutionarily important
             process, which suggests that individual differences in this
             ability should exist. As existing data are inconclusive, the
             current study explored whether a group trained in facial
             symmetry assessment, orthodontists, possessed enhanced
             abilities. Symmetry assessment was measured using face and
             nonface stimuli among orthodontic residents and two control
             groups: university participants with no symmetry training
             and airport security luggage screeners, a group previously
             shown to possess expert visual search skills unrelated to
             facial symmetry. Orthodontic residents were more accurate at
             assessing symmetry in both upright and inverted faces
             compared to both control groups, but not for nonface
             stimuli. These differences are not likely due to
             motivational biases or a speed-accuracy tradeoff-orthodontic
             residents were slower than the university participants but
             not the security screeners. Understanding such individual
             differences in facial symmetry assessment may inform the
             perception of facial attractiveness. © 2013 Taylor &amp;
             Francis.},
   Doi = {10.1080/13506285.2013.832450},
   Key = {fds253054}
}

@article{fds253057,
   Author = {Biggs, AT and Cain, MS and Clark, K and Darling, EF and Mitroff,
             SR},
   Title = {Assessing visual search performance differences between
             Transportation Security Administration Officers and
             nonprofessional visual searchers},
   Journal = {Visual Cognition},
   Volume = {21},
   Number = {3},
   Pages = {330-352},
   Year = {2013},
   ISSN = {1350-6285},
   url = {http://dx.doi.org/10.1080/13506285.2013.790329},
   Abstract = {Some visual searches depend upon accuracy (e.g., radiology,
             airport security screening), and it is important for both
             theoretical and applied reasons to understand what factors
             best predict performance. The current study administered a
             visual search task to both professional (Transportation
             Security Administration Officers) and nonprofessional
             (members of Duke University) searchers to examine group
             differences in which factors predict accuracy. Search
             speed-time taken to terminate search-was the primary
             predictor for nonprofessional searchers (accounting for 59%
             of their accuracy variability) and for the least experienced
             professional searchers (37% of variability). In contrast,
             consistency-how similarly (in terms of search speed) an
             individual spent searching from trial to trial-was the
             primary predictor for the most experienced professional
             visual searchers (39% of variability). These results inform
             cognitive theory by illuminating factors that differentially
             affect search performance between participants, and
             real-world issues by identifying search behaviours
             (consistency in particular) important to experienced
             professional searchers. © 2013 Copyright Taylor and Francis
             Group, LLC.},
   Doi = {10.1080/13506285.2013.790329},
   Key = {fds253057}
}

@article{fds253076,
   Author = {Appelbaum, LG and Cain, MS and Schroeder, JE and Darling, EF and Mitroff, SR},
   Title = {Stroboscopic visual training improves information encoding
             in short-term memory.},
   Journal = {Attention, Perception, & Psychophysics},
   Volume = {74},
   Number = {8},
   Pages = {1681-1691},
   Year = {2012},
   Month = {November},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22810559},
   Abstract = {The visual system has developed to transform an
             undifferentiated and continuous flow of information into
             discrete and manageable representations, and this ability
             rests primarily on the uninterrupted nature of the input.
             Here we explore the impact of altering how visual
             information is accumulated over time by assessing how
             intermittent vision influences memory retention. Previous
             work has shown that intermittent, or stroboscopic, visual
             training (i.e., practicing while only experiencing snapshots
             of vision) can enhance visual-motor control and visual
             cognition, yet many questions remain unanswered about the
             mechanisms that are altered. In the present study, we used a
             partial-report memory paradigm to assess the possible
             changes in visual memory following training under
             stroboscopic conditions. In Experiment 1, the memory task
             was completed before and immediately after a training phase,
             wherein participants engaged in physical activities (e.g.,
             playing catch) while wearing either specialized stroboscopic
             eyewear or transparent control eyewear. In Experiment 2, an
             additional group of participants underwent the same
             stroboscopic protocol but were delayed 24 h between
             training and assessment, so as to measure retention. In
             comparison to the control group, both stroboscopic groups
             (immediate and delayed retest) revealed enhanced retention
             of information in short-term memory, leading to better
             recall at longer stimulus-to-cue delays (640-2,560 ms).
             These results demonstrate that training under stroboscopic
             conditions has the capacity to enhance some aspects of
             visual memory, that these faculties generalize beyond the
             specific tasks that were trained, and that trained
             improvements can be maintained for at least a
             day.},
   Doi = {10.3758/s13414-012-0344-6},
   Key = {fds253076}
}

@article{fds253073,
   Author = {Donohue, SE and Darling, EF and Mitroff, SR},
   Title = {Links between multisensory processing and
             autism.},
   Journal = {Experimental Brain Research},
   Volume = {222},
   Number = {4},
   Pages = {377-387},
   Year = {2012},
   Month = {October},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22923209},
   Abstract = {Autism spectrum disorder is typically associated with social
             deficits and is often specifically linked to difficulty with
             processing faces and other socially relevant stimuli.
             Emerging research has suggested that children with autism
             might also have deficits in basic perceptual abilities
             including multisensory processing (e.g., simultaneously
             processing visual and auditory inputs). The current study
             examined the relationship between multisensory temporal
             processing (assessed via a simultaneity judgment task
             wherein participants were to report whether a visual
             stimulus and an auditory stimulus occurred at the same time
             or at different times) and self-reported symptoms of autism
             (assessed via the Autism Spectrum Quotient questionnaire).
             Data from over 100 healthy adults revealed a relationship
             between these two factors as multisensory timing perception
             correlated with symptoms of autism. Specifically, a stronger
             bias to perceive auditory stimuli occurring before visual
             stimuli as simultaneous was associated with greater levels
             of autistic symptoms. Additional data and analyses confirm
             that this relationship is specific to multisensory
             processing and symptoms of autism. These results provide
             insight into the nature of multisensory processing while
             also revealing a continuum over which perceptual abilities
             correlate with symptoms of autism and that this continuum is
             not just specific to clinical populations but is present
             within the general population.},
   Doi = {10.1007/s00221-012-3223-4},
   Key = {fds253073}
}

@article{fds253074,
   Author = {Cain, MS and Vul, E and Clark, K and Mitroff, SR},
   Title = {A bayesian optimal foraging model of human visual
             search.},
   Journal = {Psychological Science},
   Volume = {23},
   Number = {9},
   Pages = {1047-1054},
   Year = {2012},
   Month = {September},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22868494},
   Abstract = {Real-world visual searches often contain a variable and
             unknown number of targets. Such searches present difficult
             metacognitive challenges, as searchers must decide when to
             stop looking for additional targets, which results in high
             miss rates in multiple-target searches. In the study
             reported here, we quantified human strategies in
             multiple-target search via an ecological optimal foraging
             model and investigated whether searchers adapt their
             strategies to complex target-distribution statistics.
             Separate groups of individuals searched displays with the
             number of targets per trial sampled from different geometric
             distributions but with the same overall target prevalence.
             As predicted by optimal foraging theory, results showed that
             individuals searched longer when they expected more targets
             to be present and adjusted their expectations on-line during
             each search by taking into account the higher-order,
             across-trial target distributions. However, compared with
             modeled ideal observers, participants systematically
             responded as if the target distribution were more uniform
             than it was, which suggests that training could improve
             multiple-target search performance.},
   Doi = {10.1177/0956797612440460},
   Key = {fds253074}
}

@article{fds253075,
   Author = {Donohue, SE and James, B and Eslick, AN and Mitroff,
             SR},
   Title = {Cognitive pitfall! Videogame players are not immune to
             dual-task costs.},
   Journal = {Attention, Perception, & Psychophysics},
   Volume = {74},
   Number = {5},
   Pages = {803-809},
   Year = {2012},
   Month = {July},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22669792},
   Abstract = {With modern technological advances, we often find ourselves
             dividing our attention between multiple tasks. While this
             may seem a productive way to live, our attentional capacity
             is limited, and this yields costs in one or more of the many
             tasks that we try to do. Some people believe that they are
             immune to the costs of multitasking and commonly engage in
             potentially dangerous behavior, such as driving while
             talking on the phone. But are some groups of individuals
             indeed immune to dual-task costs? This study examines
             whether avid action videogame players, who have been shown
             to have heightened attentional capacities, are particularly
             adept multitaskers. Participants completed three visually
             demanding experimental paradigms (a driving videogame, a
             multiple-object-tracking task, and a visual search), with
             and without answering unrelated questions via a speakerphone
             (i.e., with and without a dual-task component). All of the
             participants, videogame players and nonvideogame players
             alike, performed worse while engaging in the additional dual
             task for all three paradigms. This suggests that extensive
             videogame experience may not offer immunity from dual-task
             costs.},
   Doi = {10.3758/s13414-012-0323-y},
   Key = {fds253075}
}

@article{fds253060,
   Author = {Clark, K and Cain, MS and Adamo, SH and Mitroff, SR},
   Title = {Overcoming hurdles in translating visual search research
             between the lab and the field.},
   Journal = {Nebraska Symposium on Motivation},
   Volume = {59},
   Pages = {147-181},
   Year = {2012},
   ISSN = {0146-7875},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23437633},
   Abstract = {Research in visual search can be vital to improving
             performance in careers such as radiology and airport
             security screening. In these applied, or "field," searches,
             accuracy is critical, and misses are potentially fatal;
             however, despite the importance of performing optimally,
             radiological and airport security searches are nevertheless
             flawed. Extensive basic research in visual search has
             revealed cognitive mechanisms responsible for successful
             visual search as well as a variety of factors that tend to
             inhibit or improve performance. Ideally, the knowledge
             gained from such laboratory-based research could be directly
             applied to field searches, but several obstacles stand in
             the way of straightforward translation; the tightly
             controlled visual searches performed in the lab can be
             drastically different from field searches. For example, they
             can differ in terms of the nature of the stimuli, the
             environment in which the search is taking place, and the
             experience and characteristics of the searchers themselves.
             The goal of this chapter is to discuss these differences and
             how they can present hurdles to translating lab-based
             research to field-based searches. Specifically, most search
             tasks in the lab entail searching for only one target per
             trial, and the targets occur relatively frequently, but
             field searches may contain an unknown and unlimited number
             of targets, and the occurrence of targets can be rare.
             Additionally, participants in lab-based search experiments
             often perform under neutral conditions and have no formal
             training or experience in search tasks; conversely, career
             searchers may be influenced by the motivation to perform
             well or anxiety about missing a target, and they have
             undergone formal training and accumulated significant
             experience searching. This chapter discusses recent work
             that has investigated the impacts of these differences to
             determine how each factor can influence search performance.
             Knowledge gained from the scientific exploration of search
             can be applied to field searches but only when considering
             and controlling for the differences between lab and
             field.},
   Key = {fds253060}
}

@article{fds253067,
   Author = {Appelbaum, LG and Cain, MS and Darling, EF and Stanton, SJ and Nguyen,
             MT and Mitroff, SR},
   Title = {Corrigendum to " What is the identity of a sports
             spectator?" [Personality and Individual Differences 52
             (2012) 422-427]},
   Journal = {Personality and Individual Differences},
   Volume = {52},
   Number = {7},
   Pages = {862-},
   Year = {2012},
   ISSN = {0191-8869},
   url = {http://dx.doi.org/10.1016/j.paid.2011.12.021},
   Doi = {10.1016/j.paid.2011.12.021},
   Key = {fds253067}
}

@article{fds253068,
   Author = {Adamo, SH and Cain, MS and Mitroff, SR},
   Title = {Self-induced attentional blink: A cause of errors in
             multiple-target visual search},
   Journal = {Visual Cognition},
   Volume = {20},
   Number = {9},
   Pages = {1004-1007},
   Year = {2012},
   ISSN = {1350-6285},
   url = {http://dx.doi.org/10.1080/13506285.2012.726448},
   Doi = {10.1080/13506285.2012.726448},
   Key = {fds253068}
}

@article{fds253070,
   Author = {Salazar, E and Cain, MS and Darling, EF and Mitroff, SR and Carin,
             L},
   Title = {Inferring latent structure from mixed real and categorical
             relational data},
   Journal = {Proceedings of the 29th International Conference on Machine
             Learning, ICML 2012},
   Volume = {2},
   Pages = {1039-1046},
   Address = {Edinburgh, Scotland},
   Year = {2012},
   url = {http://hdl.handle.net/10161/8953 Duke open
             access},
   Abstract = {We consider analysis of relational data (a matrix), in which
             the rows correspond to subjects (e.g., people) and the
             columns correspond to attributes. The elements of the matrix
             may be a mix of real and categorical. Each subject and
             attribute is characterized by a latent binary feature
             vector, and an inferred matrix maps each row-column pair of
             binary feature vectors to an observed matrix element. The
             latent binary features of the rows are modeled via a
             multivariate Gaussian distribution with low-rank covariance
             matrix, and the Gaussian random variables are mapped to
             latent binary features via a probit link. The same type
             construction is applied jointly to the columns. The model
             infers latent, low-dimensional binary features associated
             with each row and each column, as well correlation structure
             between all rows and between all columns. Copyright 2012 by
             the author(s)/owner(s).},
   Key = {fds253070}
}

@article{fds253072,
   Author = {Smith, TQ and Mitroff, SR},
   Title = {Stroboscopic training enhances anticipatory
             timing},
   Journal = {International Journal of Excerise Science},
   Volume = {5},
   Number = {4},
   Pages = {344-353},
   Year = {2012},
   Key = {fds253072}
}

@article{fds253077,
   Author = {Appelbaum, LG and Cain, MS and Darling, EF and Stanton, SJ and Nguyen,
             MT and Mitroff, SR},
   Title = {What is the identity of a sports spectator?},
   Journal = {Personality and Individual Differences},
   Volume = {52},
   Number = {3},
   Pages = {422-427},
   Year = {2012},
   ISSN = {0191-8869},
   url = {http://dx.doi.org/10.1016/j.paid.2011.10.048},
   Abstract = {Despite the prominence of sports in contemporary society,
             little is known about the identity and personality traits of
             sports spectators. With a sample of 293 individuals, we
             examine four broad categories of factors that may explain
             variability in the reported amount of time spent watching
             sports. Using individual difference regression techniques,
             we explore the relationship between sports spectating and
             physiological measures (e.g., testosterone and cortisol),
             clinical self-report scales (ADHD and autism), personality
             traits (e.g., NEO "Big Five"), and pastime activities (e.g.,
             video game playing). Our results indicate that individuals
             who report higher levels of sports spectating tend to have
             higher levels of extraversion, and in particular excitement
             seeking and gregariousness. These individuals also engage
             more in complementary pastime activities, including
             participating in sports and exercise activities, watching
             TV/movies, and playing video games. Notably, no differences
             were observed in the clinical self-report scales, indicating
             no differences in reported symptoms of ADHD or autism for
             spectators and non-spectators. Likewise, no relationship was
             seen between baseline concentrations of testosterone or
             cortisol and sports spectating in our sample. These results
             provide an assessment of the descriptive personality
             dimensions of frequent sports spectators and provide a basic
             taxonomy of how these traits are expressed across the
             population. © 2011 Elsevier Ltd.},
   Doi = {10.1016/j.paid.2011.10.048},
   Key = {fds253077}
}

@article{fds253081,
   Author = {Wu, W and Tiesinga, PH and Tucker, TR and Mitroff, SR and Fitzpatrick,
             D},
   Title = {Dynamics of population response to changes of motion
             direction in primary visual cortex.},
   Journal = {The Journal of neuroscience : the official journal of the
             Society for Neuroscience},
   Volume = {31},
   Number = {36},
   Pages = {12767-12777},
   Year = {2011},
   Month = {September},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21900556},
   Abstract = {The visual system is thought to represent the direction of
             moving objects in the relative activity of large populations
             of cortical neurons that are broadly tuned to the direction
             of stimulus motion, but how changes in the direction of a
             moving stimulus are represented in the population response
             remains poorly understood. Here we take advantage of the
             orderly mapping of direction selectivity in ferret primary
             visual cortex (V1) to explore how abrupt changes in the
             direction of a moving stimulus are encoded in population
             activity using voltage-sensitive dye imaging. For stimuli
             moving in a constant direction, the peak of the V1
             population response accurately represented the direction of
             stimulus motion, but following abrupt changes in motion
             direction, the peak transiently departed from the direction
             of stimulus motion in a fashion that varied with the
             direction offset angle and was well predicted from the
             response to the component directions. We conclude that
             cortical dynamics and population coding mechanisms combine
             to place constraints on the accuracy with which abrupt
             changes in direction of motion can be represented by
             cortical circuits.},
   Doi = {10.1523/JNEUROSCI.4307-10.2011},
   Key = {fds253081}
}

@article{fds253080,
   Author = {Cain, MS and Dunsmoor, JE and LaBar, KS and Mitroff,
             SR},
   Title = {Anticipatory anxiety hinders detection of a second target in
             dual-target search.},
   Journal = {Psychological Science},
   Volume = {22},
   Number = {7},
   Pages = {866-871},
   Year = {2011},
   Month = {July},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21670427},
   Abstract = {Professional visual searches (e.g., baggage screenings,
             military searches, radiological examinations) are often
             conducted in high-pressure environments and require focus on
             multiple visual targets. Yet laboratory studies of visual
             search tend to be conducted in emotionally neutral settings
             with only one possible target per display. In the experiment
             reported here, we looked to better emulate high-pressure
             search conditions by presenting searchers with arrays that
             contained between zero and two targets while inducing
             anticipatory anxiety via a threat-of-shock paradigm. Under
             conditions of anticipatory anxiety, dual-target performance
             was negatively affected, but single-target performance and
             time on task were unaffected. These results suggest that
             multiple-target searches may be a more sensitive instrument
             to measure the effect of environmental factors on visual
             cognition than single-target searches are. Further, the
             effect of anticipatory anxiety was modulated by individual
             differences in state anxiety levels of participants prior to
             the experiment. These results have implications for both the
             laboratory study of visual search and the management and
             assessment of professional searchers.},
   Doi = {10.1177/0956797611412393},
   Key = {fds253080}
}

@article{fds253082,
   Author = {Clark, K and Fleck, MS and Mitroff, SR},
   Title = {Enhanced change detection performance reveals improved
             strategy use in avid action video game players.},
   Journal = {Acta Psychologica},
   Volume = {136},
   Number = {1},
   Pages = {67-72},
   Year = {2011},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21062660},
   Abstract = {Recent research has shown that avid action video game
             players (VGPs) outperform non-video game players (NVGPs) on
             a variety of attentional and perceptual tasks. However, it
             remains unknown exactly why and how such differences arise;
             while some prior research has demonstrated that VGPs'
             improvements stem from enhanced basic perceptual processes,
             other work indicates that they can stem from enhanced
             attentional control. The current experiment used a
             change-detection task to explore whether top-down strategies
             can contribute to VGPs' improved abilities. Participants
             viewed alternating presentations of an image and a modified
             version of the image and were tasked with detecting and
             localizing the changed element. Consistent with prior claims
             of enhanced perceptual abilities, VGPs were able to detect
             the changes while requiring less exposure to the change than
             NVGPs. Further analyses revealed this improved change
             detection performance may result from altered strategy use;
             VGPs employed broader search patterns when scanning scenes
             for potential changes. These results complement prior
             demonstrations of VGPs' enhanced bottom-up perceptual
             benefits by providing new evidence of VGPs' potentially
             enhanced top-down strategic benefits.},
   Doi = {10.1016/j.actpsy.2010.10.003},
   Key = {fds253082}
}

@article{fds253078,
   Author = {Cain, MS and Mitroff, SR},
   Title = {Distractor filtering in media multitaskers.},
   Journal = {Perception},
   Volume = {40},
   Number = {10},
   Pages = {1183-1192},
   Year = {2011},
   ISSN = {0301-0066},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22308888},
   Abstract = {A growing amount of modern media is consumed simultaneously,
             a phenomenon known as 'media multitasking'. Individuals who
             regularly engage in this activity, heavy media multitaskers
             (HMMs), are more affected by irrelevant information that can
             intrude into a primary task than are light media
             multitaskers (LMMs--Ophir et al, 2009 Proceedings of the
             National Academy of Sciences of the USA 106 15583). However,
             the locus of this deficit is unknown, as previous research
             is consistent with both memory and attentional explanations.
             Here, we isolated attentional processes by employing a
             singleton distractor task with low working-memory demands.
             In this task, LMMs used top-down information to improve
             their performance, yet HMMs did not. This difference in
             performance in an established attentional capture task
             argues for the presence of attentional differences in HMMs
             and is consistent with the idea that HMMs maintain a wider
             attentional scope than LMMs, even when instructed
             otherwise.},
   Doi = {10.1068/p7017},
   Key = {fds253078}
}

@article{fds253079,
   Author = {Appelbaum, LG and Schroeder, JE and Cain, MS and Mitroff,
             SR},
   Title = {Improved Visual Cognition through Stroboscopic
             Training.},
   Journal = {Frontiers in Psychology},
   Volume = {2},
   Number = {276},
   Pages = {276},
   Year = {2011},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22059078},
   Abstract = {Humans have a remarkable capacity to learn and adapt, but
             surprisingly little research has demonstrated generalized
             learning in which new skills and strategies can be used
             flexibly across a range of tasks and contexts. In the
             present work we examined whether generalized learning could
             result from visual-motor training under stroboscopic visual
             conditions. Individuals were assigned to either an
             experimental condition that trained with stroboscopic
             eyewear or to a control condition that underwent identical
             training with non-stroboscopic eyewear. The training
             consisted of multiple sessions of athletic activities during
             which participants performed simple drills such as throwing
             and catching. To determine if training led to generalized
             benefits, we used computerized measures to assess perceptual
             and cognitive abilities on a variety of tasks before and
             after training. Computer-based assessments included measures
             of visual sensitivity (central and peripheral motion
             coherence thresholds), transient spatial attention (a useful
             field of view - dual task paradigm), and sustained attention
             (multiple-object tracking). Results revealed that
             stroboscopic training led to significantly greater re-test
             improvement in central visual field motion sensitivity and
             transient attention abilities. No training benefits were
             observed for peripheral motion sensitivity or peripheral
             transient attention abilities, nor were benefits seen for
             sustained attention during multiple-object tracking. These
             findings suggest that stroboscopic training can effectively
             improve some, but not all aspects of visual perception and
             attention.},
   Doi = {10.3389/fpsyg.2011.00276},
   Key = {fds253079}
}

@article{fds253087,
   Author = {Costello, MC and Madden, DJ and Shepler, AM and Mitroff, SR and Leber,
             AB},
   Title = {Age-related preservation of top-down control over
             distraction in visual search.},
   Journal = {Experimental Aging Research},
   Volume = {36},
   Number = {3},
   Pages = {249-272},
   Year = {2010},
   Month = {July},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20544447},
   Abstract = {Visual search studies have demonstrated that older adults
             can have preserved or even increased top-down control over
             distraction. However, the results are mixed as to the extent
             of this age-related preservation. The present experiment
             assesses group differences in younger and older adults
             during visual search, with a task featuring two conditions
             offering varying degrees of top-down control over
             distraction. After controlling for generalized slowing, the
             analyses revealed that the age groups were equally capable
             of utilizing top-down control to minimize distraction.
             Furthermore, for both age groups, the distraction effect was
             manifested in a sustained manner across the reaction time
             distribution.},
   Doi = {10.1080/0361073X.2010.484719},
   Key = {fds253087}
}

@article{fds304704,
   Author = {Costello, MC and Madden, DJ and Mitroff, SR and Whiting,
             WL},
   Title = {Age-related decline of visual processing components in
             change detection.},
   Journal = {Psychology and Aging},
   Volume = {25},
   Number = {2},
   Pages = {356-368},
   Year = {2010},
   Month = {June},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20545420},
   Abstract = {Previous research has suggested that an age-related decline
             in change detection may be due to older adults using a more
             conservative response criterion. However, this finding may
             reflect methodological limitations of the traditional change
             detection design, in which displays are presented
             continuously until a change is detected. Across 2
             experiments, the authors assessed adult age differences in a
             version of change detection that required a response after
             each pair of pre- and postchange displays, thus reducing the
             potential contribution of response criterion. Older adults
             performed worse than younger adults, committing more errors
             and requiring a greater number of display cycles for correct
             detection. These age-related performance declines were
             substantially reduced after controlling statistically for
             elementary perceptual speed. Search strategy was largely
             similar for the 2 age groups, but perceptual speed was less
             successful in accounting for age-related variance in
             detectability when a more precise spatial localization of
             change was required (Experiment 2). Thus, the negative
             effect of aging in the present tasks lies in a reduction of
             detection efficiency due largely to processing speed, though
             some strategy-level effects may also contribute. (PsycINFO
             Database Record (c) 2010 APA, all rights
             reserved).},
   Doi = {10.1037/a0017625},
   Key = {fds304704}
}

@article{fds304703,
   Author = {Donohue, SE and Woldorff, MG and Mitroff, SR},
   Title = {Video game players show more precise multisensory temporal
             processing abilities.},
   Journal = {Attention, Perception, & Psychophysics},
   Volume = {72},
   Number = {4},
   Pages = {1120-1129},
   Year = {2010},
   Month = {May},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20436205},
   Abstract = {Recent research has demonstrated enhanced visual attention
             and visual perception in individuals with extensive
             experience playing action video games. These benefits
             manifest in several realms, but much remains unknown about
             the ways in which video game experience alters perception
             and cognition. In the present study, we examined whether
             video game players' benefits generalize beyond vision to
             multisensory processing by presenting auditory and visual
             stimuli within a short temporal window to video game players
             and non-video game players. Participants performed two
             discrimination tasks, both of which revealed benefits for
             video game players: In a simultaneity judgment task, video
             game players were better able to distinguish whether simple
             visual and auditory stimuli occurred at the same moment or
             slightly offset in time, and in a temporal-order judgment
             task, they revealed an enhanced ability to determine the
             temporal sequence of multisensory stimuli. These results
             suggest that people with extensive experience playing video
             games display benefits that extend beyond the visual
             modality to also impact multisensory processing.},
   Doi = {10.3758/APP.72.4.1120},
   Key = {fds304703}
}

@article{fds253085,
   Author = {Fleck, MS and Samei, E and Mitroff, SR},
   Title = {Generalized "satisfaction of search": adverse influences on
             dual-target search accuracy.},
   Journal = {Journal of Experimental Psychology: Applied},
   Volume = {16},
   Number = {1},
   Pages = {60-71},
   Year = {2010},
   Month = {March},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20350044},
   Abstract = {The successful detection of a target in a radiological
             search can reduce the detectability of a second target, a
             phenomenon termed satisfaction of search (SOS). Given the
             potential consequences, here we investigate the generality
             of SOS with the goal of simultaneously informing radiology,
             cognitive psychology, and nonmedical searches such as
             airport luggage screening. Ten experiments utilizing
             nonmedical searches and untrained searchers suggest that SOS
             is affected by a diverse array of factors, including (1) the
             relative frequency of different target types, (2) external
             pressures (reward and time), and (3) expectations about the
             number of targets present. Collectively, these experiments
             indicate that SOS arises when searchers have a biased
             expectation about the low likelihood of specific targets or
             events, and when they are under pressure to perform
             efficiently. This first demonstration of SOS outside of
             radiology implicates a general heuristic applicable to many
             kinds of searches. In an example like airport luggage
             screening, the current data suggest that the detection of an
             easy-to-spot target (e.g., a water bottle) might reduce
             detection of a hard-to-spot target (e.g., a box
             cutter).},
   Doi = {10.1037/a0018629},
   Key = {fds253085}
}

@article{fds253065,
   Author = {Hubal, R and Mitroff, SR and Cain, MS and Scott, B and DeWitt,
             R},
   Title = {Simulating a vigilance task: Extensible technology for
             baggage security assessment and training},
   Journal = {2010 IEEE International Conference on Technologies for
             Homeland Security, HST 2010},
   Pages = {543-548},
   Year = {2010},
   url = {http://dx.doi.org/10.1109/THS.2010.5654982},
   Abstract = {A number of homeland security occupations require vigilance
             to potentially subtle events in the environment, with high
             stakes for missing infrequent but consequential items.
             Sustained vigilance can be required for long periods of time
             or when sleep-deprived or physically inactive, compounding
             the difficulty of this task. Research on sustained vigilance
             has largely focused on tasks such as driving, air traffic
             control, medical screening, and military specialties, but
             the findings closely apply also to other homeland
             security-related occupations. A research area that has
             received relatively little attention, but is of critical
             importance to homeland security, involves the role of
             individual differences in vigilance. Prior research suggests
             that certain individuals are better than others at searching
             for rarely present targets over long time periods, yet what
             is driving this effect remains unclear. Further, it is not
             known whether or not sustained vigilance can be improved
             through training. This research team is studying two
             research questions: Are there individual differences in the
             inherent ability to sustain vigilance? and What are the most
             effective approaches for training and improving sustained
             vigilance for rare items or events?. The intent is to employ
             tasks (primarily visual identification and gross motor
             tests) that readily translate to the relevant homeland
             security occupations requiring sustained vigilance. © 2010
             IEEE.},
   Doi = {10.1109/THS.2010.5654982},
   Key = {fds253065}
}

@article{fds253084,
   Author = {Donohue, SE and Woldorff, MG and Mitroff, SR},
   Title = {Multisensory benefits of playing video games},
   Journal = {Attention, Perception, & Psychophysics},
   Volume = {72},
   Number = {4},
   Pages = {1120-1129},
   Year = {2010},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20436205},
   Abstract = {Recent research has demonstrated enhanced visual attention
             and visual perception in individuals with extensive
             experience playing action video games. These benefits
             manifest in several realms, but much remains unknown about
             the ways in which video game experience alters perception
             and cognition. In the present study, we examined whether
             video game players' benefits generalize beyond vision to
             multisensory processing by presenting auditory and visual
             stimuli within a short temporal window to video game players
             and non-video game players. Participants performed two
             discrimination tasks, both of which revealed benefits for
             video game players: In a simultaneity judgment task, video
             game players were better able to distinguish whether simple
             visual and auditory stimuli occurred at the same moment or
             slightly offset in time, and in a temporal-order judgment
             task, they revealed an enhanced ability to determine the
             temporal sequence of multisensory stimuli. These results
             suggest that people with extensive experience playing video
             games display benefits that extend beyond the visual
             modality to also impact multisensory processing.},
   Doi = {10.3758/APP.72.4.1120},
   Key = {fds253084}
}

@article{fds253086,
   Author = {Costello, MC and Madden, DJ and Mitroff, SR and Whiting,
             WL},
   Title = {Age-related decline of visual processing components in
             change detection},
   Journal = {Psychology & Aging},
   Volume = {25},
   Number = {2},
   Pages = {256-268},
   Year = {2010},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20545420},
   Abstract = {Previous research has suggested that an age-related decline
             in change detection may be due to older adults using a more
             conservative response criterion. However, this finding may
             reflect methodological limitations of the traditional change
             detection design, in which displays are presented
             continuously until a change is detected. Across 2
             experiments, the authors assessed adult age differences in a
             version of change detection that required a response after
             each pair of pre- and postchange displays, thus reducing the
             potential contribution of response criterion. Older adults
             performed worse than younger adults, committing more errors
             and requiring a greater number of display cycles for correct
             detection. These age-related performance declines were
             substantially reduced after controlling statistically for
             elementary perceptual speed. Search strategy was largely
             similar for the 2 age groups, but perceptual speed was less
             successful in accounting for age-related variance in
             detectability when a more precise spatial localization of
             change was required (Experiment 2). Thus, the negative
             effect of aging in the present tasks lies in a reduction of
             detection efficiency due largely to processing speed, though
             some strategy-level effects may also contribute. (PsycINFO
             Database Record (c) 2010 APA, all rights
             reserved).},
   Doi = {10.1037/a0017625},
   Key = {fds253086}
}

@article{fds253088,
   Author = {Jordan, KE and Clark, K and Mitroff, SR},
   Title = {See an object, hear an object file: Object correspondence
             transcends sensory modality},
   Journal = {Visual Cognition},
   Volume = {18},
   Number = {4},
   Pages = {492-503},
   Year = {2010},
   ISSN = {1350-6285},
   url = {http://dx.doi.org/10.1080/13506280903338911},
   Abstract = {An important task of perceptual processing is to parse
             incoming information into distinct units and to keep track
             of those units over time as the same, persisting
             representations. Within the study of visual perception,
             maintaining such persisting object representations is helped
             by "object files"-episodic representations that store (and
             update) information about objects' properties and track
             objects over time and motion via spatiotemporal information.
             Although object files are typically discussed as visual,
             here we demonstrate that object-file correspondence can be
             computed across sensory modalities. An object file can be
             initially formed with visual input and later accessed with
             corresponding auditory information, suggesting that object
             files may be able to operate at a multimodal level of
             perceptual processing. © 2010 Psychology Press, an imprint
             of the Taylor &amp; Francis Group, an Informa
             business.},
   Doi = {10.1080/13506280903338911},
   Key = {fds253088}
}

@article{fds253089,
   Author = {Wang, S-H and Mitroff, SR},
   Title = {Preserved visual representations despite change blindness in
             infants.},
   Journal = {Developmental Science},
   Volume = {12},
   Number = {5},
   Pages = {681-687},
   Year = {2009},
   Month = {September},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/19702760},
   Abstract = {Combining theoretical hypotheses of infant cognition and
             adult perception, we present evidence that infants can
             maintain visual representations despite their failure to
             detect a change. Infants under 12 months typically fail to
             notice a change to an object's height in a covering event.
             The present experiments demonstrated that 11-month-old
             infants can nevertheless maintain a viable representation of
             both the pre- and post-change heights despite their 'change
             blindness'. These results suggest that infants, like adults,
             can simultaneously maintain multiple representations, even
             if they do not optimally use them.},
   Doi = {10.1111/j.1467-7687.2008.00800.x},
   Key = {fds253089}
}

@article{fds253083,
   Author = {Dunsmoor, JE and Mitroff, SR and LaBar, KS},
   Title = {Generalization of conditioned fear along a dimension of
             increasing fear intensity.},
   Journal = {Learning & memory (Cold Spring Harbor, N.Y.)},
   Volume = {16},
   Number = {7},
   Pages = {460-469},
   Year = {2009},
   Month = {July},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/19553384},
   Abstract = {The present study investigated the extent to which fear
             generalization in humans is determined by the amount of fear
             intensity in nonconditioned stimuli relative to a
             perceptually similar conditioned stimulus. Stimuli consisted
             of graded emotionally expressive faces of the same identity
             morphed between neutral and fearful endpoints. Two
             experimental groups underwent discriminative fear
             conditioning between a face stimulus of 55% fear intensity
             (conditioned stimulus, CS+), reinforced with an electric
             shock, and a second stimulus that was unreinforced (CS-). In
             Experiment 1 the CS- was a relatively neutral face stimulus,
             while in Experiment 2 the CS- was the most fear-intense
             stimulus. Before and following fear conditioning, skin
             conductance responses (SCR) were recorded to different morph
             values along the neutral-to-fear dimension. Both
             experimental groups showed gradients of generalization
             following fear conditioning that increased with the fear
             intensity of the stimulus. In Experiment 1 a peak shift in
             SCRs extended to the most fear-intense stimulus. In
             contrast, generalization to the most fear-intense stimulus
             was reduced in Experiment 2, suggesting that discriminative
             fear learning procedures can attenuate fear generalization.
             Together, the findings indicate that fear generalization is
             broadly tuned and sensitive to the amount of fear intensity
             in nonconditioned stimuli, but that fear generalization can
             come under stimulus control. These results reveal a novel
             form of fear generalization in humans that is not merely
             based on physical similarity to a conditioned exemplar, and
             may have implications for understanding generalization
             processes in anxiety disorders characterized by heightened
             sensitivity to nonthreatening stimuli.},
   Doi = {10.1101/lm.1431609},
   Key = {fds253083}
}

@article{fds253090,
   Author = {Mitroff, SR and Arita, JT and Fleck, MS},
   Title = {Staying in bounds: Contextual constraints on object-file
             coherence.},
   Journal = {Visual Cognition},
   Volume = {17},
   Number = {1-2},
   Pages = {195-211},
   Year = {2009},
   ISSN = {1350-6285},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/19498955},
   Abstract = {Coherent visual perception necessitates the ability to track
             distinct objects as the same entities over time and motion.
             Calculations of such object persistence appear to be fairly
             automatic and constrained by specific rules. We explore the
             nature of object persistence here within the object-file
             framework; object files are mid-level visual representations
             that track entities over time and motion as the same
             persisting objects and store and update information about
             the objects. We present three new findings. First, objects
             files are constrained by the principle of "boundedness";
             persisting entities should maintain a single closed contour.
             Second, object files are constrained by the principle of
             "containment"; all the parts and properties of a persisting
             object should reside within, and be connected to, the object
             itself. Third, object files are sensitive to the context in
             which an object appears; the very same physical entity that
             can instantiate object-file formation in one experimental
             context cannot in another. This contextual influence
             demonstrates for the first time that object files are
             sensitive to more than just the physical properties
             contained within any given visual display.},
   Doi = {10.1080/13506280802103457},
   Key = {fds253090}
}

@article{fds253064,
   Author = {Cheries, EW and Mitroff, SR and Wynn, K and Scholl,
             BJ},
   Title = {Cohesion as a constraint on object persistence in
             infancy.},
   Journal = {Developmental Science},
   Volume = {11},
   Number = {3},
   Pages = {427-432},
   Year = {2008},
   Month = {May},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18466376},
   Abstract = {A critical challenge for visual perception is to represent
             objects as the same persisting individuals over time and
             motion. Across several areas of cognitive science,
             researchers have identified cohesion as among the most
             important theoretical principles of object persistence: An
             object must maintain a single bounded contour over time.
             Drawing inspiration from recent work in adult visual
             cognition, the present study tested the power of cohesion as
             a constraint as it operates early in development. In
             particular, we tested whether the most minimal cohesion
             violation - a single object splitting into two - would
             destroy infants' ability to represent a quantity of objects
             over occlusion. In a forced-choice crawling paradigm, 10-
             and 12-month-old infants witnessed crackers being
             sequentially placed into containers, and typically crawled
             toward the container with the greater cracker quantity. When
             one of the crackers was visibly split in half, however,
             infants failed to represent the relative quantities, despite
             controls for the overall quantities and the motions
             involved. This result helps to characterize the fidelity and
             specificity of cohesion as a fundamental principle of object
             persistence, suggesting that even the simplest possible
             cohesion violation can dramatically impair infants' object
             representations and influence their overt
             behavior.},
   Doi = {10.1111/j.1467-7687.2008.00687.x},
   Key = {fds253064}
}

@article{fds253091,
   Author = {Cheries, E and Mitroff, SR and Wynn, K and Scholl,
             BJ},
   Title = {Cohesion as a principle of object persistence in
             infancy.},
   Journal = {Developmental Science},
   Volume = {11},
   Number = {427-432},
   Year = {2008},
   Key = {fds253091}
}

@article{fds253093,
   Author = {Mitroff, SR and Alvarez, GA},
   Title = {Space and time, not surface features, guide object
             persistence.},
   Journal = {Psychonomic Bulletin and Review},
   Volume = {14},
   Number = {6},
   Pages = {1199-1204},
   Year = {2007},
   Month = {December},
   ISSN = {1069-9384},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18229497},
   Abstract = {Successful visual perception relies on the ability to keep
             track of distinct entities as the same persisting objects
             from one moment to the next. This is a computationally
             difficult process and its underlying nature remains unclear.
             Here we use the object file framework to explore whether
             surface feature information (e.g., color, shape) can be used
             to compute such object persistence. From six experiments we
             find that spatiotemporal information (location as a function
             of time) easily determines object files, but surface
             features do not. The results suggest an unexpectedly strong
             constraint on the visual system's ability to compute online
             object persistence.},
   Key = {fds253093}
}

@article{fds253092,
   Author = {Fleck, MS and Mitroff, SR},
   Title = {Rare targets are rarely missed in correctable
             search.},
   Journal = {Psychological Science},
   Volume = {18},
   Number = {11},
   Pages = {943-947},
   Year = {2007},
   Month = {November},
   ISSN = {0956-7976},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/17958706},
   Abstract = {Failing to find a tumor in an x-ray scan or a gun in an
             airport baggage screening can have dire consequences, making
             it fundamentally important to elucidate the mechanisms that
             hinder performance in such visual searches. Recent
             laboratory work has indicated that low target prevalence can
             lead to disturbingly high miss rates in visual search. Here,
             however, we demonstrate that misses in low-prevalence
             searches can be readily abated. When targets are rarely
             present, observers adapt by responding more quickly, and
             miss rates are high. Critically, though, these misses are
             often due to response-execution errors, not perceptual or
             identification errors: Observers know a target was present,
             but just respond too quickly. When provided an opportunity
             to correct their last response, observers can catch their
             mistakes. Thus, low target prevalence may not be a
             generalizable cause of high miss rates in visual
             search.},
   Doi = {10.1111/j.1467-9280.2007.02006.x},
   Key = {fds253092}
}

@article{fds253094,
   Author = {Mitroff, SR and Scholl, BJ and Noles, NS},
   Title = {Object files can be purely episodic.},
   Journal = {Perception},
   Volume = {36},
   Number = {12},
   Pages = {1730-1735},
   Year = {2007},
   ISSN = {0301-0066},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18283924},
   Abstract = {Our ability to track an object as the same persisting entity
             over time and motion may primarily rely on spatiotemporal
             representations which encode some, but not all, of an
             object's features. Previous researchers using the 'object
             reviewing' paradigm have demonstrated that such
             representations can store featural information of
             well-learned stimuli such as letters and words at a highly
             abstract level. However, it is unknown whether these
             representations can also store purely episodic information
             (i.e. information obtained from a single, novel encounter)
             that does not correspond to pre-existing
             type-representations in long-term memory. Here, in an
             object-reviewing experiment with novel face images as
             stimuli, observers still produced reliable object-specific
             preview benefits in dynamic displays: a preview of a novel
             face on a specific object speeded the recognition of that
             particular face at a later point when it appeared again on
             the same object compared to when it reappeared on a
             different object (beyond display-wide priming), even when
             all objects moved to new positions in the intervening delay.
             This case study demonstrates that the mid-level visual
             representations which keep track of persisting identity over
             time--e.g. 'object files', in one popular framework can
             store not only abstract types from long-term memory, but
             also specific tokens from online visual experience.},
   Doi = {10.1068/p5804},
   Key = {fds253094}
}

@article{fds253096,
   Author = {Mitroff, SR and Sobel, DM and Gopnik, A},
   Title = {Reversing how to think about ambiguous figure reversals:
             spontaneous alternating by uninformed observers.},
   Journal = {Perception},
   Volume = {35},
   Number = {5},
   Pages = {709-715},
   Year = {2006},
   ISSN = {0301-0066},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/16836059},
   Abstract = {Ambiguous figures are a special class of images that can
             give rise to multiple interpretations. Traditionally,
             switching between the possible interpretations of an
             ambiguous figure, or reversing one's interpretation, has
             been attributed either to top-down or to bottom-up processes
             (e.g. attributed to having knowledge of the nature of the
             ambiguity, or to a form of neuronal fatigue). Here we
             present evidence that is incompatible with both forms of
             explanations. Observers aged 5-9 years can reverse ambiguous
             figures when uninformed about the ambiguity, negating purely
             top-down explanations. Further, those children who make
             these 'spontaneous' reversals are more likely to succeed on
             a high-order theory-of-mind task, negating purely bottom-up
             explanations.},
   Doi = {10.1068/p5520},
   Key = {fds253096}
}

@article{fds253099,
   Author = {Mitroff, SR and Scholl, BJ and Wynn, K},
   Title = {The relationship between object files and conscious
             perception.},
   Journal = {Cognition},
   Volume = {96},
   Number = {1},
   Pages = {67-92},
   Year = {2005},
   Month = {May},
   ISSN = {0010-0277},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/15833307},
   Abstract = {Object files (OFs) are hypothesized mid-level
             representations which mediate our conscious perception of
             persisting objects-e.g. telling us 'which went where'.
             Despite the appeal of the OF framework, not previous
             research has directly explored whether OFs do indeed
             correspond to conscious percepts. Here we present at least
             one case wherein conscious percepts of 'which went where' in
             dynamic ambiguous displays diverge from the analogous
             correspondence computed by the OF system. Observers viewed a
             'bouncing/streaming' display in which two identical objects
             moved such that they could have either bounced off or
             streamed past each other. We measured two dependent
             variables: (1) an explicit report of perceived bouncing or
             streaming; and (2) an implicit 'object-specific preview
             benefit' (OSPB), wherein a 'preview' of information on a
             specific object speeds the recognition of that information
             at a later point when it appears again on the same object
             (compared to when it reappears on a different object),
             beyond display-wide priming. When the displays were
             manipulated such that observers had a strong bias to
             perceive streaming (on over 95% of the trials), there was
             nevertheless a strong OSPB in the opposite direction-such
             that the object files appeared to have 'bounced' even though
             the percept 'streamed'. Given that OSPBs have been taken as
             a hallmark of the operation of object files, the five
             experiments reported here suggest that in at least some
             specialized (and perhaps ecologically invalid) cases,
             conscious percepts of 'which went where' in dynamic
             ambiguous displays can diverge from the mapping computed by
             the object-file system.},
   Doi = {10.1016/j.cognition.2004.03.008},
   Key = {fds253099}
}

@article{fds253100,
   Author = {Mitroff, SR and Scholl, BJ},
   Title = {Forming and updating object representations without
             awareness: evidence from motion-induced blindness.},
   Journal = {Vision Research},
   Volume = {45},
   Number = {8},
   Pages = {961-967},
   Year = {2005},
   Month = {April},
   ISSN = {0042-6989},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/15695181},
   Abstract = {The input to visual processing consists of an
             undifferentiated array of features which must be parsed into
             discrete units. Here we explore the degree to which
             conscious awareness is important for forming such object
             representations, and for updating them in the face of
             changing visual scenes. We do so by exploiting the
             phenomenon of motion-induced blindness (MIB), wherein
             salient (and even attended) objects fluctuate into and out
             of conscious awareness when superimposed onto certain global
             motion patterns. By introducing changes to unseen visual
             stimuli during MIB, we demonstrate that object
             representations can be formed and updated even without
             conscious access to those objects. Such changes can then
             influence not only how stimuli reenter awareness, but also
             what reenters awareness. We demonstrate that this processing
             encompasses simple object representations and also several
             independent Gestalt grouping cues. We conclude that flexible
             visual parsing over time and visual change can occur even
             without conscious perception. Methodologically, we conclude
             that MIB may be an especially useful tool for studying the
             role of awareness in visual processing and vice
             versa.},
   Doi = {10.1016/j.visres.2004.09.044},
   Key = {fds253100}
}

@article{fds253095,
   Author = {Noles, NS and Scholl, BJ and Mitroff, SR},
   Title = {The persistence of object file representations.},
   Journal = {Perception & psychophysics},
   Volume = {67},
   Number = {2},
   Pages = {324-334},
   Year = {2005},
   Month = {February},
   ISSN = {0031-5117},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/15973783},
   Abstract = {Coherent visual experience of dynamic scenes requires not
             only that the visual system segment scenes into component
             objects but that these object representations persist, so
             that an object can be identified as the same object from an
             earlier time. Object files (OFs) are visual representations
             thought to mediate such abilities: OFs lie between lower
             level sensory processing and higher level recognition, and
             they track salient objects over time and motion. OFs have
             traditionally been studied via object-specific preview
             benefits (OSPBs), in which discriminations of an object's
             features are speeded when an earlier preview of those
             features occurred on the same object, as opposed to on a
             different object, beyond general displaywide priming.
             Despite its popularity, many fundamental aspects of the OF
             framework remain unexplored. For example, although OFs are
             thought to be involved primarily in online visual
             processing, we do not know how long such representations
             persist; previous studies found OSPBs for up to 1500 msec
             but did not test for longer durations. We explored this
             issue using a modified object reviewing paradigm and found
             that robust OSPBs persist for more than five times longer
             than has previously been tested-for at least 8 sec, and
             possibly for much longer. Object files may be the "glue"
             that makes visual experience coherent not just in online
             moment-by-moment processing, but on the scale of seconds
             that characterizes our everyday perceptual experiences.
             These findings also bear on research in infant cognition,
             where OFs are thought to explain infants' abilities to track
             and enumerate small sets of objects over longer
             durations.},
   Key = {fds253095}
}

@article{fds253103,
   Author = {Mitroff, SR and Simons, DJ and Levin, DT},
   Title = {Nothing compares 2 views: change blindness can occur despite
             preserved access to the changed information.},
   Journal = {Perception & psychophysics},
   Volume = {66},
   Number = {8},
   Pages = {1268-1281},
   Year = {2004},
   Month = {November},
   ISSN = {0031-5117},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/15813193},
   Abstract = {Change blindness, the failure to detect visual changes that
             occur during a disruption, has increasingly been used to
             infer the nature of internal representations. If every
             change were detected, detailed representations of the world
             would have to be stored and accessible. However, because
             many changes are not detected, visual representations might
             not be complete, and access to them might be limited. Using
             change detection to infer the completeness of visual
             representations requires an understanding of the reasons for
             change blindness. This article provides empirical support
             for one such reason: change blindness resulting from the
             failure to compare retained representations of both the pre-
             and postchange information. Even when unaware of changes,
             observers still retained information about both the pre- and
             postchange objects on the same trial.},
   Key = {fds253103}
}

@article{fds253102,
   Author = {Mitroff, SR and Scholl, BJ and Wynn, K},
   Title = {Divide and conquer: how object files adapt when a persisting
             object splits into two.},
   Journal = {Psychological Science},
   Volume = {15},
   Number = {6},
   Pages = {420-425},
   Year = {2004},
   Month = {June},
   ISSN = {0956-7976},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/15147497},
   Abstract = {Coherent visual experience requires not only segmenting
             incoming visual input into a structured scene of objects,
             but also binding discrete views of objects into dynamic
             representations that persist across time and motion.
             However, surprisingly little work has explored the
             principles that guide the construction and maintenance of
             such persisting object representations. What causes a part
             of the visual field to be treated as the same object over
             time? In the cognitive development literature, a key
             principle of object persistence is cohesion: An object must
             always maintain a single bounded contour. Here we
             demonstrate for the first time that mechanisms of adult
             midlevel vision are affected by cohesion violations. Using
             the object-file framework, we tested whether object-specific
             preview benefits-a hallmark of persisting object
             representations-are obtained for dynamic objects that split
             into two during their motion. We found that these preview
             benefits do not fully persist through such cohesion
             violations without incurring significant performance costs.
             These results illustrate how cohesion is employed as a
             constraint that guides the maintenance of object
             representations in adult midlevel vision.},
   Doi = {10.1111/j.0956-7976.2004.00695.x},
   Key = {fds253102}
}

@article{fds253101,
   Author = {Mitroff, SR and Scholl, BJ},
   Title = {Seeing the disappearance of unseen objects.},
   Journal = {Perception},
   Volume = {33},
   Number = {10},
   Pages = {1267-1273},
   Year = {2004},
   ISSN = {0301-0066},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/15693670},
   Abstract = {Because of the massive amount of incoming visual
             information, perception is fundamentally selective. We are
             aware of only a small subset of our visual input at any
             given moment, and a great deal of activity can occur right
             in front of our eyes without reaching awareness. While
             previous work has shown that even salient visual objects can
             go unseen, here we demonstrate the opposite pattern, wherein
             observers perceive stimuli which are not physically present.
             In particular, we show in two motion-induced blindness
             experiments that unseen objects can momentarily reenter
             awareness when they physically disappear: in some
             situations, you can see the disappearance of something you
             can't see. Moreover, when a stimulus changes outside of
             awareness in this situation and then physically disappears,
             observers momentarily see the altered version--thus
             perceiving properties of an object that they had never seen
             before, after that object is already gone. This phenomenon
             of 'perceptual reentry' yields new insights into the
             relationship between visual memory and conscious
             awareness.},
   Doi = {10.1068/p5341no},
   Key = {fds253101}
}

@article{fds304702,
   Author = {Mitroff, SR and Scholl, BJ},
   Title = {Last but not least : Seeing the disappearance of unseen
             objects},
   Journal = {Perception},
   Volume = {33},
   Number = {10},
   Pages = {1267-1273},
   Year = {2004},
   url = {http://dx.doi.org/10.1068/p5341},
   Abstract = {Because of the massive amount of incoming visual
             information, perception is fundamentally selective. We are
             aware of only a small subset of our visual input at any
             given moment, and a great deal of activity can occur right
             in front of our eyes without reaching awareness. While
             previous work has shown that even salient visual objects can
             go unseen, here we demonstrate the opposite pattern, wherein
             observers perceive stimuli which are not physically present.
             In particular, we show in two motion-induced blindness
             experiments that unseen objects can momentarily reenter
             awareness when they physically disappear: in some
             situations, you can see the disappearance of something you
             can't see. Moreover, when a stimulus changes outside of
             awareness in this situation and then physically disappears,
             observers momentarily see the altered version-thus
             perceiving properties of an object that they had never seen
             before, after that object is already gone. This phenomenon
             of 'perceptual reentry' yields new insights into the
             relationship between visual memory and conscious
             awareness.},
   Doi = {10.1068/p5341},
   Key = {fds304702}
}

@article{fds253062,
   Author = {Mitroff, SR and Scholl, BJ and Wynn, K},
   Title = {The relationship between object files and conscious
             perception},
   Journal = {Journal of Vision},
   Volume = {3},
   Number = {9},
   Pages = {338a},
   Year = {2003},
   url = {http://dx.doi.org/10.1167/3.9.338},
   Abstract = {Many aspects of mid-level vision appear to operate on the
             basis of representations which precede identification and
             recognition, but in which discrete objects are segmented
             from the background and tracked over time (unlike early
             sensory representations). It has become increasingly common
             to discuss such phenomena in terms of 'object files' (OFs) -
             critical mid-level representations which help mediate our
             conscious perception of persisting objects - e.g. telling us
             'which went where'. Despite the appeal of the OF framework,
             it remains unclear to what degree OFs underlie consciously
             perceived object trajectories. Here we present at least one
             case wherein conscious percepts of 'which went where' in
             dynamic displays diverge from the computation of 'which went
             where' in the OF system. Observers viewed an ambiguous
             'bouncing/streaming' display in which two identical objects
             moved such that they could have either streamed past or
             bounced off each other. We measured two dependent variables:
             (1) an explicit report of perceived bouncing or streaming;
             and (2) an implicit object-specific priming (OSP) measure,
             wherein a 'preview' of information on a specific object -
             e.g. a letter that flashes inside a small box - speeds the
             recognition of that letter at a later point when it appears
             again on the same box (compared to when it reappears on a
             different box). When the displays were manipulated such that
             observers had a strong bias to perceive streaming (on over
             90% of the trials), there was nevertheless a strong
             *negative* OSP associated with the streaming motion, such
             that the OSP appeared to have 'bounced' even though the
             percept 'streamed'. Given that OSP measures have been taken
             as a hallmark of the operation of object files, this
             suggests that in at least some cases conscious percepts of
             'which went where' in dynamic ambiguous displays can
             override the mapping computed by the object-file
             system.},
   Doi = {10.1167/3.9.338},
   Key = {fds253062}
}

@article{fds253097,
   Author = {Mitroff, SR and Simons, DJ and Franconeri, SL},
   Title = {The siren song of implicit change detection.},
   Journal = {Journal of Experimental Psychology: Human Perception and
             Performance},
   Volume = {28},
   Number = {4},
   Pages = {798-815},
   Year = {2002},
   Month = {August},
   ISSN = {0096-1523},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/12190251},
   Abstract = {Although change blindness could suggest that observers
             represent far less of their visual world than their
             conscious experience leads them to believe, they could fail
             to detect changes even if they fully represent all details.
             Reports of implicit change detection in the absence of
             awareness are consistent with the notion that observers'
             representations are more complete than previously thought.
             However, to provide convincing evidence, studies must
             separate implicit detection from explicit processes. This
             article reexamines the 3 primary claims of implicit change
             detection and, after replicating original findings, provides
             theoretical and empirical support for alternative, explicit
             explanations. Even if observers do represent more of the
             scene than previously thought, change detection might occur
             only through explicit comparisons.},
   Key = {fds253097}
}

@article{fds253098,
   Author = {Mitroff, SR and Simons, DJ},
   Title = {Changes are not localized before they are explicity
             detected},
   Journal = {Visual Cognition},
   Volume = {9},
   Number = {8},
   Pages = {937-968},
   Year = {2002},
   url = {http://dx.doi.org/10.1080/13506280143000476},
   Abstract = {Change detection is in many ways analogous to visual search.
             Yet, unlike search, successful detection depends not on the
             salience of features within a scene, but on the difference
             between the original and modified scene. If, as in search,
             preattentive mechanisms guide attention to the change
             location, the change itself must produce a preattentively
             detectable signal. Despite recent evidence for implicit
             representation of change in the absence of conscious
             detection, few studies have yet explored whether attention
             is guided to a change location prior to explicit detection.
             In four "change blindness" experiments using several
             variants of the "flicker" task, we tested the hypothesis
             that implicit or preattentive mechanisms guide change
             localization prior to explicit detection. None of the
             experiments revealed improved localization of changes prior
             to explicit reports of detection, suggesting that implicit
             detection of change does not contribute to the eventual
             explicit localization of a change. Instead, change
             localization is essentially arbitrary, driven by the
             salience of features within scenes.},
   Doi = {10.1080/13506280143000476},
   Key = {fds253098}
}

@article{fds253061,
   Author = {Mitroff, SR and Simons, DJ},
   Title = {A lack of confidence in implicit change detection},
   Journal = {Journal of Vision},
   Volume = {1},
   Number = {3},
   Pages = {7a},
   Year = {2001},
   url = {http://dx.doi.org/10.1167/1.3.7},
   Abstract = {Purpose: Two recent studies suggest that explicit measures
             of change detection may overestimate change blindness.
             First, when observers reported no explicit awareness of a
             change, their response latency was still affected by the
             presence of the change (Williams and Simons 2000). Second,
             without explicit awareness of a change, the identity of a
             changed object influenced accuracy in a related judgment
             task (Thornton and Fernandez-Duque 2000). The current
             studies explore whether these findings provide evidence for
             implicit change detection without explicit awareness or
             whether they could result from explicit processing. Methods:
             In Experiment 1, observers reported whether or not they
             believed a display change occurred and then rated their
             confidence in their response. In Experiment 2, on every
             trial, observers first performed an orientation judgment
             task and then noted whether or not they had seen a change.
             To replicate earlier results, the position of the target of
             the orientation judgment was spatially linked to the
             position of the changed item. In our new condition, this
             spatial link was disrupted. Results: As in earlier studies,
             observers were quicker to respond 'same' when there was no
             change than when there was a change. However, they were also
             quicker to respond when they were more confident, and these
             differences in confidence accounted for the response time
             differences when there was or was not a change. In
             Experiment 2, when the position of the changed item and the
             target of the perceptual judgment were spatially linked, the
             identity of the changed item affected the judgment. Yet, no
             influence remained when the spatial link was de-coupled,
             suggesting the effect may be due to an explicit search
             strategy. Conclusion: Our results question the existence of
             an implicit comparison process that allows for change
             detection in the absence of explicit processing.},
   Doi = {10.1167/1.3.7},
   Key = {fds253061}
}


%% Chapters in Books   
@misc{fds322521,
   Author = {Clark, K and Cain, MS and Mitroff, SR},
   Title = {Perception and human information processing in visual
             search},
   Pages = {199-217},
   Booktitle = {The Cambridge Handbook of Applied Perception
             Research},
   Year = {2015},
   Month = {January},
   ISBN = {9780511973017},
   url = {http://dx.doi.org/10.1017/CBO9780511973017.016},
   Abstract = {© Cambridge University Press 2015. Visual search is the
             process of finding specific target items within an
             environment using particular visual features or prior
             knowledge. Searches can be as easy as finding your friend
             with purple hair in a lecture hall or as complicated as
             finding a purposefully concealed weapon among thousands of
             harmless bags at an airport checkpoint. Visual searches take
             place in everyday, innocuous contexts such as finding your
             car in a parking lot, and in critical contexts, such as
             finding enemy combatants in an urban battlefield. We conduct
             searches all the time, and most searches are relatively
             commonplace. However, in some cases, visual searches can be
             critically important. For example, airport security
             screeners must identify harmful items in baggage, and
             radiologists must identify abnormalities in medical
             radiographs. Despite the ubiquitous nature of search and the
             fact that it is sometimes life-or-death critical, human
             visual search is far from ideal - errors are often made, and
             searches are typically conducted for either too little or
             too much time. Thus, some fundamental research questions are
             the following: How can we maximize search efficiency? What
             is the best way to increase both search speed and accuracy?
             Much academic research has focused on increasing search
             performance, but does such research adequately translate to
             situations outside the laboratory environment? These open
             questions are the foundation of research in applied visual
             search - the application of what has been learned about
             search accuracy and efficiency from lab-based
             experimentation to search conditions in the workplace for
             career searchers, with the goal of increasing
             performance.},
   Doi = {10.1017/CBO9780511973017.016},
   Key = {fds322521}
}

@misc{fds220629,
   Author = {Harris, J. A. and Barrack, D. L. and McMahon, A. R. and Mitroff, S.R. and Woldorff, M. G.},
   Title = {Object-category processing during motion-induced blindness
             as revealed by electrophysiological recordings},
   Pages = {97-106},
   Booktitle = {Cognitive Electrophysiology of Attention: Signals of the
             Mind},
   Publisher = {Elsevier Academic Press},
   Address = {San Diego, CA},
   Editor = {G.R. Mangun},
   Year = {2014},
   Key = {fds220629}
}

@misc{fds302521,
   Author = {Cheries, EW and Mitroff, SR and Wynn, K and Scholl,
             BJ},
   Title = {Do the same principles constrain persisting object
             representations in infant cognition and adult perception?:
             The cases of continuity and cohesion},
   Booktitle = {The Origins of Object Knowledge},
   Year = {2012},
   Month = {March},
   ISBN = {9780191696039},
   url = {http://dx.doi.org/10.1093/acprof:oso/9780199216895.003.0005},
   Abstract = {© Oxford University Press, 2009. All rights reserved. In
             recent years, the study of object persistence has undergone
             a major rebirth in the two fields of cognitive science -
             infant cognition and adult vision science. Given the
             difference between the two, some researchers have suggested
             that they may in fact be studying the same underlying mental
             processes. This idea promises to drive further progress by
             generating novel predictions that can then be tested in both
             fields. The authors in this chapter focus on the
             understanding of two core principles of persistence -
             continuity and cohesion. The initial explorations of both
             principles in infant cognition directly inspired research in
             adult vision science, which in turn sparked further and more
             specific explorations of the operation of these principles
             back in infant cognition. The case studies presented in this
             chapter highlight the benefits of explicitly and directly
             exploring how infant cognition research can inform adult
             perception research, and vice versa.},
   Doi = {10.1093/acprof:oso/9780199216895.003.0005},
   Key = {fds302521}
}

@misc{fds212288,
   Author = {Clark, K. and Cain, M. S. and Adamo, S. H. and Mitroff, S.
             R.},
   Title = {Examining influences on applied visual search
             performance},
   Year = {2012},
   Key = {fds212288}
}

@misc{fds212287,
   Author = {Clark, K. and Cain, M. S. and Mitroff, S. R.},
   Title = {Perception and human information processing in visual
             search},
   Booktitle = {Cambridge University Handbook on Applied Perception
             Research},
   Editor = {R. Hoffman and J. Szalma and P. Hancock and R. Parasuraman and M.
             Scerbo},
   Year = {2012},
   Key = {fds212287}
}

@misc{fds154472,
   Author = {Cheries, E. and Mitroff, S. R. and Wynn, K. and Scholl, B.
             J.},
   Title = {Constraints on Persisting Object Representations in Infants
             and Adults.},
   Booktitle = {The Origins of Object Knowledge. London: Oxford University
             Press.},
   Editor = {B. Hood and L. Santos},
   Year = {2009},
   Key = {fds154472}
}

@misc{fds302523,
   Author = {Simons, DJ and Mitroff, SR and Franconeri, SL},
   Title = {Scene Perception: What We Can Learn from Visual Integration
             and Change Detection},
   Booktitle = {Perception of Faces, Objects, and Scenes: Analytic and
             Holistic Processes},
   Year = {2006},
   Month = {June},
   ISBN = {9780199848058},
   url = {http://dx.doi.org/10.1093/acprof:oso/9780195313659.003.0013},
   Abstract = {© 2003 by Oxford University Press, Inc. All rights
             reserved. Much of perception does not require that
             information be preserved from one view to the next. This
             chapter's review of the visual-integration and
             change-detection literature suggests that precise and
             complete visual representations may be unnecessary for the
             experience of a stable, continuous visual world. Instead,
             the experience of stability is driven by precise
             representations of the information needed to guide action,
             accompanied by an assumption that the properties of objects
             in the world are unlikely to change across views. Of course,
             more sensitive measures might reveal the existence of
             complete, precise representations of all aspects of the
             visual world, but such detailed representations are not
             needed to explain the experience of an unchanging world from
             one view to the next.},
   Doi = {10.1093/acprof:oso/9780195313659.003.0013},
   Key = {fds302523}
}

@misc{fds139258,
   Author = {Simons, D. J. and Mitroff, S. R. and Franconeri, S.
             L.},
   Title = {Scene perception: What we can learn from visual integration
             and change detection.},
   Pages = {335-351},
   Booktitle = {Perception of faces, objects, and scenes: Analytic and
             holistic processes. Advances in visual cognition},
   Editor = {M. Peterson and G. Rhodes},
   Year = {2003},
   Key = {fds139258}
}


%% Articles Submitted   
@article{fds183242,
   Author = {Madden, D. J. and Mitroff, S. R. and Shepler, A. M. and Fleck, M. S. and Costello, M. and Voss, A.},
   Title = {Adult Age Differences in Top-Down Attentional Control During
             Rare Target Search.},
   Year = {2010},
   Key = {fds183242}
}


%% Other   
@misc{fds198271,
   Author = {Hubal, R. and Cain, M. S. and Mitroff, S. R.},
   Title = {Simulating a vigilance task: Technology for homeland
             security research.},
   Journal = {Interservice/Industry Training, Simulation, and Education
             Conference (I/ITSEC). Orlando, FL.},
   Year = {2011},
   Key = {fds198271}
}

@misc{fds198272,
   Author = {Cain, M. S. and Vul, E. and Clark, K. and Mitroff, S.
             R.},
   Title = {Optimal models of human multiple-target visual
             search.},
   Journal = {Proceedings of the 33rd Annual Conference of the Cognitive
             Science Society. Boston, MA: Cognitive Science
             Society.},
   Year = {2011},
   Key = {fds198272}
}

@misc{fds183238,
   Author = {Mitroff, S. R. and Hariri, A.},
   Title = {Identifying Predictive Markers of Field Performance: The
             Potential Role of Individual Differences in Threat
             Sensitivity},
   Journal = {Institute for Homeland Security Solutions Research Brief.
             https://www.ihssnc.org},
   Year = {2010},
   Key = {fds183238}
}

@misc{fds183239,
   Author = {Madden, D. J. and Mitroff S. R.},
   Title = {Aging and Top-Down Attentional Control in Visual
             Search},
   Journal = {Institute for Homeland Security Solutions Research Brief.
             https://www.ihssnc.org},
   Year = {2010},
   Key = {fds183239}
}


Duke University * Arts & Sciences * Faculty * Staff * Grad * Postdocs * Reload * Login