Department of Mathematics
 Search | Help | Login | pdf version | printable version

Math @ Duke





.......................

.......................


Publications of Guillermo Sapiro    :chronological  alphabetical  combined listing:

%% Papers Published   
@article{fds370417,
   Author = {Isaev, DY and Vlasova, RM and Di Martino and JM and Stephen, CD and Schmahmann, JD and Sapiro, G and Gupta, AS},
   Title = {Uncertainty of Vowel Predictions as a Digital Biomarker for
             Ataxic Dysarthria.},
   Journal = {Cerebellum (London, England)},
   Volume = {23},
   Number = {2},
   Pages = {459-470},
   Year = {2024},
   Month = {April},
   url = {http://dx.doi.org/10.1007/s12311-023-01539-z},
   Abstract = {Dysarthria is a common manifestation across cerebellar
             ataxias leading to impairments in communication, reduced
             social connections, and decreased quality of life. While
             dysarthria symptoms may be present in other neurological
             conditions, ataxic dysarthria is a perceptually distinct
             motor speech disorder, with the most prominent
             characteristics being articulation and prosody abnormalities
             along with distorted vowels. We hypothesized that
             uncertainty of vowel predictions by an automatic speech
             recognition system can capture speech changes present in
             cerebellar ataxia. Speech of participants with ataxia (N=61)
             and healthy controls (N=25) was recorded during the "picture
             description" task. Additionally, participants' dysarthric
             speech and ataxia severity were assessed on a Brief Ataxia
             Rating Scale (BARS). Eight participants with ataxia had
             speech and BARS data at two timepoints. A neural network
             trained for phoneme prediction was applied to speech
             recordings. Average entropy of vowel tokens predictions
             (AVE) was computed for each participant's recording,
             together with mean pitch and intensity standard deviations
             (MPSD and MISD) in the vowel segments. AVE and MISD
             demonstrated associations with BARS speech score (Spearman's
             rho=0.45 and 0.51), and AVE demonstrated associations with
             BARS total (rho=0.39). In the longitudinal cohort, Wilcoxon
             pairwise signed rank test demonstrated an increase in BARS
             total and AVE, while BARS speech and acoustic measures did
             not significantly increase. Relationship of AVE to both BARS
             speech and BARS total, as well as the ability to capture
             disease progression even in absence of measured speech
             decline, indicates the potential of AVE as a digital
             biomarker for cerebellar ataxia.},
   Doi = {10.1007/s12311-023-01539-z},
   Key = {fds370417}
}

@article{fds375344,
   Author = {Franz, L and Viljoen, M and Askew, S and Brown, M and Dawson, G and Di
             Martino, JM and Sapiro, G and Sebolai, K and Seris, N and Shabalala, N and Stahmer, A and Turner, EL and de Vries, PJ},
   Title = {Autism Caregiver Coaching in Africa (ACACIA): Protocol for a
             type 1-hybrid effectiveness-implementation
             trial.},
   Journal = {PLoS One},
   Volume = {19},
   Number = {1},
   Pages = {e0291883},
   Year = {2024},
   url = {http://dx.doi.org/10.1371/journal.pone.0291883},
   Abstract = {BACKGROUND: While early autism intervention can
             significantly improve outcomes, gaps in implementation exist
             globally. These gaps are clearest in Africa, where forty
             percent of the world's children will live by 2050.
             Task-sharing early intervention to non-specialists is a key
             implementation strategy, given the lack of specialists in
             Africa. Naturalistic Developmental Behavioral Interventions
             (NDBI) are a class of early autism intervention that can be
             delivered by caregivers. As a foundational step to address
             the early autism intervention gap, we adapted a
             non-specialist delivered caregiver coaching NDBI for the
             South African context, and pre-piloted this cascaded
             task-sharing approach in an existing system of care.
             OBJECTIVES: First, we will test the effectiveness of the
             caregiver coaching NDBI compared to usual care. Second, we
             will describe coaching implementation factors within the
             Western Cape Department of Education in South Africa.
             METHODS: This is a type 1 effectiveness-implementation
             hybrid design; assessor-blinded, group randomized controlled
             trial. Participants include 150 autistic children (18-72
             months) and their caregivers who live in Cape Town, South
             Africa, and those involved in intervention implementation.
             Early Childhood Development practitioners, employed by the
             Department of Education, will deliver 12, one hour, coaching
             sessions to the intervention group. The control group will
             receive usual care. Distal co-primary outcomes include the
             Communication Domain Standard Score (Vineland Adaptive
             Behavior Scales, Third Edition) and the Language and
             Communication Developmental Quotient (Griffiths Scales of
             Child Development, Third Edition). Proximal secondary
             outcome include caregiver strategies measured by the sum of
             five items from the Joint Engagement Rating Inventory. We
             will describe key implementation determinants. RESULTS:
             Participant enrolment started in April 2023. Estimated
             primary completion date is March 2027. CONCLUSION: The
             ACACIA trial will determine whether a cascaded task-sharing
             intervention delivered in an educational setting leads to
             meaningful improvements in communication abilities of
             autistic children, and identify implementation barriers and
             facilitators. TRIAL REGISTRATION: NCT05551728 in Clinical
             Trial Registry (https://clinicaltrials.gov).},
   Doi = {10.1371/journal.pone.0291883},
   Key = {fds375344}
}

@article{fds374279,
   Author = {Nazaret, A and Tonekaboni, S and Darnell, G and Ren, SY and Sapiro, G and Miller, AC},
   Title = {Modeling personalized heart rate response to exercise and
             environmental factors with wearables data},
   Journal = {npj Digital Medicine},
   Volume = {6},
   Number = {1},
   Year = {2023},
   Month = {December},
   url = {http://dx.doi.org/10.1038/s41746-023-00926-4},
   Abstract = {Heart rate (HR) response to workout intensity reflects
             fitness and cardiorespiratory health. Physiological models
             have been developed to describe such heart rate dynamics and
             characterize cardiorespiratory fitness. However, these
             models have been limited to small studies in controlled lab
             environments and are challenging to apply to noisy—but
             ubiquitous—data from wearables. We propose a hybrid
             approach that combines a physiological model with flexible
             neural network components to learn a personalized,
             multidimensional representation of fitness. The
             physiological model describes the evolution of heart rate
             during exercise using ordinary differential equations
             (ODEs). ODE parameters are dynamically derived via a neural
             network connecting personalized representations to external
             environmental factors, from area topography to weather and
             instantaneous workout intensity. Our approach efficiently
             fits the hybrid model to a large set of 270,707 workouts
             collected from wearables of 7465 users from the Apple Heart
             and Movement Study. The resulting model produces fitness
             representations that accurately predict full HR response to
             exercise intensity in future workouts, with a per-workout
             median error of 6.1 BPM [4.4–8.8 IQR]. We further
             demonstrate that the learned representations correlate with
             traditional metrics of cardiorespiratory fitness, such as
             VO2 max (explained variance 0.81 ± 0.003). Lastly, we
             illustrate how our model is naturally interpretable and
             explicitly describes the effects of environmental factors
             such as temperature and humidity on heart rate, e.g., high
             temperatures can increase heart rate by 10%. Combining
             physiological ODEs with flexible neural networks can yield
             interpretable, robust, and expressive models for health
             applications.},
   Doi = {10.1038/s41746-023-00926-4},
   Key = {fds374279}
}

@article{fds373966,
   Author = {Schlesinger, O and Kundu, R and Goetz, S and Sapiro, G and Peterchev,
             AV and Di Martino and JM},
   Title = {Automatic Neurocranial Landmarks Detection from Visible
             Facial Landmarks Leveraging 3D Head Priors.},
   Journal = {Clin Image Based Proced Fairness AI Med Imaging Ethical
             Philos Issues Med Imaging (2023)},
   Volume = {14242},
   Pages = {12-20},
   Year = {2023},
   Month = {October},
   ISBN = {9783031452482},
   url = {http://dx.doi.org/10.1007/978-3-031-45249-9_2},
   Abstract = {The localization and tracking of neurocranial landmarks is
             essential in modern medical procedures, e.g., transcranial
             magnetic stimulation (TMS). However, state-of-the-art
             treatments still rely on the manual identification of head
             targets and require setting retroreflective markers for
             tracking. This limits the applicability and scalability of
             TMS approaches, making them time-consuming, dependent on
             expensive hardware, and prone to errors when retroreflective
             markers drift from their initial position. To overcome these
             limitations, we propose a scalable method capable of
             inferring the position of points of interest on the scalp,
             e.g., the International 10-20 System's neurocranial
             landmarks. In contrast with existing approaches, our method
             does not require human intervention or markers; head
             landmarks are estimated leveraging visible facial landmarks,
             optional head size measurements, and statistical head model
             priors. We validate the proposed approach on ground truth
             data from 1,150 subjects, for which facial 3D and head
             information is available; our technique achieves a
             localization RMSE of 2.56 mm on average, which is of the
             same order as reported by high-end techniques in TMS. Our
             implementation is available at https://github.com/odedsc/ANLD.},
   Doi = {10.1007/978-3-031-45249-9_2},
   Key = {fds373966}
}

@article{fds373329,
   Author = {Perochon, S and Di Martino and JM and Carpenter, KLH and Compton, S and Davis, N and Eichner, B and Espinosa, S and Franz, L and Krishnappa
             Babu, PR and Sapiro, G and Dawson, G},
   Title = {Early detection of autism using digital behavioral
             phenotyping.},
   Journal = {Nat Med},
   Volume = {29},
   Number = {10},
   Pages = {2489-2497},
   Year = {2023},
   Month = {October},
   url = {http://dx.doi.org/10.1038/s41591-023-02574-3},
   Abstract = {Early detection of autism, a neurodevelopmental condition
             associated with challenges in social communication, ensures
             timely access to intervention. Autism screening
             questionnaires have been shown to have lower accuracy when
             used in real-world settings, such as primary care, as
             compared to research studies, particularly for children of
             color and girls. Here we report findings from a multiclinic,
             prospective study assessing the accuracy of an autism
             screening digital application (app) administered during a
             pediatric well-child visit to 475 (17-36 months old)
             children (269 boys and 206 girls), of which 49 were
             diagnosed with autism and 98 were diagnosed with
             developmental delay without autism. The app displayed
             stimuli that elicited behavioral signs of autism, quantified
             using computer vision and machine learning. An algorithm
             combining multiple digital phenotypes showed high diagnostic
             accuracy with the area under the receiver operating
             characteristic curve = 0.90, sensitivity = 87.8%,
             specificity = 80.8%, negative predictive
             value = 97.8% and positive predictive value = 40.6%.
             The algorithm had similar sensitivity performance across
             subgroups as defined by sex, race and ethnicity. These
             results demonstrate the potential for digital phenotyping to
             provide an objective, scalable approach to autism screening
             in real-world settings. Moreover, combining results from
             digital phenotyping and caregiver questionnaires may
             increase autism screening accuracy and help reduce
             disparities in access to diagnosis and intervention.},
   Doi = {10.1038/s41591-023-02574-3},
   Key = {fds373329}
}

@article{fds373965,
   Author = {Nazaret, A and Sapiro, G},
   Title = {A large-scale observational study of the causal effects of a
             behavioral health nudge.},
   Journal = {Science advances},
   Volume = {9},
   Number = {38},
   Pages = {eadi1752},
   Year = {2023},
   Month = {September},
   url = {http://dx.doi.org/10.1126/sciadv.adi1752},
   Abstract = {Nudges are interventions promoting healthy behavior without
             forbidding options or substantial incentives; the Apple
             Watch, for example, encourages users to stand by delivering
             a notification if they have been sitting for the first 50
             minutes of an hour. On the basis of 76 billion minutes of
             observational standing data from 160,000 subjects in the
             public Apple Heart and Movement Study, we estimate the
             causal effect of this notification using a regression
             discontinuity design for time series data with time-varying
             treatment. We show that the nudge increases the probability
             of standing by up to 43.9% and remains effective with time.
             The nudge's effectiveness increases with age and is
             independent of gender. Closing Apple Watch Activity Rings, a
             visualization of participants' daily progress in Move,
             Exercise, and Stand, further increases the nudge's impact.
             This work demonstrates the effectiveness of behavioral
             health interventions and introduces tools for investigating
             their causal effect from large-scale observations.},
   Doi = {10.1126/sciadv.adi1752},
   Key = {fds373965}
}

@article{fds371669,
   Author = {Chen, J and Engelhard, M and Henao, R and Berchuck, S and Eichner, B and Perrin, EM and Sapiro, G and Dawson, G},
   Title = {Enhancing early autism prediction based on electronic
             records using clinical narratives.},
   Journal = {J Biomed Inform},
   Volume = {144},
   Pages = {104390},
   Year = {2023},
   Month = {August},
   url = {http://dx.doi.org/10.1016/j.jbi.2023.104390},
   Abstract = {Recent work has shown that predictive models can be applied
             to structured electronic health record (EHR) data to
             stratify autism likelihood from an early age (<1 year).
             Integrating clinical narratives (or notes) with structured
             data has been shown to improve prediction performance in
             other clinical applications, but the added predictive value
             of this information in early autism prediction has not yet
             been explored. In this study, we aimed to enhance the
             performance of early autism prediction by using both
             structured EHR data and clinical narratives. We built models
             based on structured data and clinical narratives separately,
             and then an ensemble model that integrated both sources of
             data. We assessed the predictive value of these models from
             Duke University Health System over a 14-year span to
             evaluate ensemble models predicting later autism diagnosis
             (by age 4 years) from data collected from ages 30 to
             360 days. Our sample included 11,750 children above by age
             3 years (385 meeting autism diagnostic criteria). The
             ensemble model for autism prediction showed superior
             performance and at age 30 days achieved 46.8% sensitivity
             (95% confidence interval, CI: 22.0%, 52.9%), 28.0% positive
             predictive value (PPV) at high (90%) specificity (CI: 2.0%,
             33.1%), and AUC4 (with at least 4-year follow-up for
             controls) reaching 0.769 (CI: 0.715, 0.811). Prediction by
             360 days achieved 44.5% sensitivity (CI: 23.6%, 62.9%),
             and 13.7% PPV at high (90%) specificity (CI: 9.6%, 18.9%),
             and AUC4 reaching 0.797 (CI: 0.746, 0.840). Results show
             that incorporating clinical narratives in early autism
             prediction achieved promising accuracy by age 30 days,
             outperforming models based on structured data only.
             Furthermore, findings suggest that additional features
             learned from clinician narratives might be hypothesis
             generating for understanding early development in
             autism.},
   Doi = {10.1016/j.jbi.2023.104390},
   Key = {fds371669}
}

@article{fds371278,
   Author = {Coffman, M and Di Martino and JM and Aiello, R and Carpenter, KLH and Chang, Z and Compton, S and Eichner, B and Espinosa, S and Flowers, J and Franz, L and Perochon, S and Krishnappa Babu and PR and Sapiro, G and Dawson, G},
   Title = {Relationship between quantitative digital behavioral
             features and clinical profiles in young autistic
             children.},
   Journal = {Autism Res},
   Volume = {16},
   Number = {7},
   Pages = {1360-1374},
   Year = {2023},
   Month = {July},
   url = {http://dx.doi.org/10.1002/aur.2955},
   Abstract = {Early behavioral markers for autism include differences in
             social attention and orienting in response to one's name
             when called, and differences in body movements and motor
             abilities. More efficient, scalable, objective, and reliable
             measures of these behaviors could improve early screening
             for autism. This study evaluated whether objective and
             quantitative measures of autism-related behaviors elicited
             from an app (SenseToKnow) administered on a smartphone or
             tablet and measured via computer vision analysis (CVA) are
             correlated with standardized caregiver-report and clinician
             administered measures of autism-related behaviors and
             cognitive, language, and motor abilities. This is an
             essential step in establishing the concurrent validity of a
             digital phenotyping approach. In a sample of 485 toddlers,
             43 of whom were diagnosed with autism, we found that
             CVA-based gaze variables related to social attention were
             associated with the level of autism-related behaviors. Two
             language-related behaviors measured via the app, attention
             to people during a conversation and responding to one's name
             being called, were associated with children's language
             skills. Finally, performance during a bubble popping game
             was associated with fine motor skills. These findings
             provide initial support for the concurrent validity of the
             SenseToKnow app and its potential utility in identifying
             clinical profiles associated with autism. Future research is
             needed to determine whether the app can be used as an autism
             screening tool, can reliably stratify autism-related
             behaviors, and measure changes in autism-related behaviors
             over time.},
   Doi = {10.1002/aur.2955},
   Key = {fds371278}
}

@article{fds370641,
   Author = {Krishnappa Babu and PR and Aikat, V and Di Martino and JM and Chang, Z and Perochon, S and Espinosa, S and Aiello, R and L H Carpenter and K and Compton, S and Davis, N and Eichner, B and Flowers, J and Franz, L and Dawson, G and Sapiro, G},
   Title = {Blink rate and facial orientation reveal distinctive
             patterns of attentional engagement in autistic toddlers: a
             digital phenotyping approach.},
   Journal = {Sci Rep},
   Volume = {13},
   Number = {1},
   Pages = {7158},
   Year = {2023},
   Month = {May},
   url = {http://dx.doi.org/10.1038/s41598-023-34293-7},
   Abstract = {Differences in social attention are well-documented in
             autistic individuals, representing one of the earliest signs
             of autism. Spontaneous blink rate has been used to index
             attentional engagement, with lower blink rates reflecting
             increased engagement. We evaluated novel methods using
             computer vision analysis (CVA) for automatically quantifying
             patterns of attentional engagement in young autistic
             children, based on facial orientation and blink rate, which
             were captured via mobile devices. Participants were 474
             children (17-36 months old), 43 of whom were diagnosed with
             autism. Movies containing social or nonsocial content were
             presented via an iPad app, and simultaneously, the device's
             camera recorded the children's behavior while they watched
             the movies. CVA was used to extract the duration of time the
             child oriented towards the screen and their blink rate as
             indices of attentional engagement. Overall, autistic
             children spent less time facing the screen and had a higher
             mean blink rate compared to neurotypical children.
             Neurotypical children faced the screen more often and
             blinked at a lower rate during the social movies compared to
             the nonsocial movies. In contrast, autistic children faced
             the screen less often during social movies than during
             nonsocial movies and showed no differential blink rate to
             social versus nonsocial movies.},
   Doi = {10.1038/s41598-023-34293-7},
   Key = {fds370641}
}

@article{fds370385,
   Author = {Isaev, DY and Sabatos-DeVito, M and Di Martino and JM and Carpenter, K and Aiello, R and Compton, S and Davis, N and Franz, L and Sullivan, C and Dawson, G and Sapiro, G},
   Title = {Computer Vision Analysis of Caregiver-Child Interactions in
             Children with Neurodevelopmental Disorders: A Preliminary
             Report.},
   Journal = {J Autism Dev Disord},
   Year = {2023},
   Month = {April},
   url = {http://dx.doi.org/10.1007/s10803-023-05973-0},
   Abstract = {We report preliminary results of computer vision analysis of
             caregiver-child interactions during free play with children
             diagnosed with autism (N = 29, 41-91 months),
             attention-deficit/hyperactivity disorder (ADHD, N = 22,
             48-100 months), or combined autism + ADHD (N = 20,
             56-98 months), and neurotypical children (NT, N = 7,
             55-95 months). We conducted micro-analytic analysis of
             'reaching to a toy,' as a proxy for initiating or responding
             to a toy play bout. Dyadic analysis revealed two clusters of
             interaction patterns, which differed in frequency of
             'reaching to a toy' and caregivers' contingent responding to
             the child's reach for a toy by also reaching for a toy.
             Children in dyads with higher caregiver responsiveness had
             less developed language, communication, and socialization
             skills. Clusters were not associated with diagnostic groups.
             These results hold promise for automated methods of
             characterizing caregiver responsiveness in dyadic
             interactions for assessment and outcome monitoring in
             clinical trials.},
   Doi = {10.1007/s10803-023-05973-0},
   Key = {fds370385}
}

@article{fds369364,
   Author = {Perochon, S and Matias Di Martino and J and Carpenter, KLH and Compton,
             S and Davis, N and Espinosa, S and Franz, L and Rieder, AD and Sullivan, C and Sapiro, G and Dawson, G},
   Title = {A tablet-based game for the assessment of visual motor
             skills in autistic children.},
   Journal = {NPJ Digit Med},
   Volume = {6},
   Number = {1},
   Pages = {17},
   Year = {2023},
   Month = {February},
   url = {http://dx.doi.org/10.1038/s41746-023-00762-6},
   Abstract = {Increasing evidence suggests that early motor impairments
             are a common feature of autism. Thus, scalable, quantitative
             methods for measuring motor behavior in young autistic
             children are needed. This work presents an engaging and
             scalable assessment of visual-motor abilities based on a
             bubble-popping game administered on a tablet. Participants
             are 233 children ranging from 1.5 to 10 years of age (147
             neurotypical children and 86 children diagnosed with autism
             spectrum disorder [autistic], of which 32 are also diagnosed
             with co-occurring attention-deficit/hyperactivity disorder
             [autistic+ADHD]). Computer vision analyses are used to
             extract several game-based touch features, which are
             compared across autistic, autistic+ADHD, and neurotypical
             participants. Results show that younger (1.5-3 years)
             autistic children pop the bubbles at a lower rate, and their
             ability to touch the bubble's center is less accurate
             compared to neurotypical children. When they pop a bubble,
             their finger lingers for a longer period, and they show more
             variability in their performance. In older children
             (3-10-years), consistent with previous research, the
             presence of co-occurring ADHD is associated with greater
             motor impairment, reflected in lower accuracy and more
             variable performance. Several motor features are correlated
             with standardized assessments of fine motor and cognitive
             abilities, as evaluated by an independent clinical
             assessment. These results highlight the potential of
             touch-based games as an efficient and scalable approach for
             assessing children's visual-motor skills, which can be part
             of a broader screening tool for identifying early signs
             associated with autism.},
   Doi = {10.1038/s41746-023-00762-6},
   Key = {fds369364}
}

@article{fds369365,
   Author = {Engelhard, MM and Henao, R and Berchuck, SI and Chen, J and Eichner, B and Herkert, D and Kollins, SH and Olson, A and Perrin, EM and Rogers, U and Sullivan, C and Zhu, Y and Sapiro, G and Dawson, G},
   Title = {Predictive Value of Early Autism Detection Models Based on
             Electronic Health Record Data Collected Before Age 1
             Year.},
   Journal = {JAMA Netw Open},
   Volume = {6},
   Number = {2},
   Pages = {e2254303},
   Year = {2023},
   Month = {February},
   url = {http://dx.doi.org/10.1001/jamanetworkopen.2022.54303},
   Abstract = {IMPORTANCE: Autism detection early in childhood is critical
             to ensure that autistic children and their families have
             access to early behavioral support. Early correlates of
             autism documented in electronic health records (EHRs) during
             routine care could allow passive, predictive model-based
             monitoring to improve the accuracy of early detection.
             OBJECTIVE: To quantify the predictive value of early autism
             detection models based on EHR data collected before age 1
             year. DESIGN, SETTING, AND PARTICIPANTS: This retrospective
             diagnostic study used EHR data from children seen within the
             Duke University Health System before age 30 days between
             January 2006 and December 2020. These data were used to
             train and evaluate L2-regularized Cox proportional hazards
             models predicting later autism diagnosis based on data
             collected from birth up to the time of prediction (ages
             30-360 days). Statistical analyses were performed between
             August 1, 2020, and April 1, 2022. MAIN OUTCOMES AND
             MEASURES: Prediction performance was quantified in terms of
             sensitivity, specificity, and positive predictive value
             (PPV) at clinically relevant model operating thresholds.
             RESULTS: Data from 45 080 children, including 924 (1.5%)
             meeting autism criteria, were included in this study.
             Model-based autism detection at age 30 days achieved 45.5%
             sensitivity and 23.0% PPV at 90.0% specificity. Detection by
             age 360 days achieved 59.8% sensitivity and 17.6% PPV at
             81.5% specificity and 38.8% sensitivity and 31.0% PPV at
             94.3% specificity. CONCLUSIONS AND RELEVANCE: In this
             diagnostic study of an autism screening test, EHR-based
             autism detection achieved clinically meaningful accuracy by
             age 30 days, improving by age 1 year. This automated
             approach could be integrated with caregiver surveys to
             improve the accuracy of early autism screening.},
   Doi = {10.1001/jamanetworkopen.2022.54303},
   Key = {fds369365}
}

@article{fds365564,
   Author = {Krishnappa Babu and PR and Di Martino and JM and Chang, Z and Perochon, S and Aiello, R and Carpenter, KLH and Compton, S and Davis, N and Franz, L and Espinosa, S and Flowers, J and Dawson, G and Sapiro,
             G},
   Title = {Complexity analysis of head movements in autistic
             toddlers.},
   Journal = {J Child Psychol Psychiatry},
   Volume = {64},
   Number = {1},
   Pages = {156-166},
   Year = {2023},
   Month = {January},
   url = {http://dx.doi.org/10.1111/jcpp.13681},
   Abstract = {BACKGROUND: Early differences in sensorimotor functioning
             have been documented in young autistic children and infants
             who are later diagnosed with autism. Previous research has
             demonstrated that autistic toddlers exhibit more frequent
             head movement when viewing dynamic audiovisual stimuli,
             compared to neurotypical toddlers. To further explore this
             behavioral characteristic, in this study, computer vision
             (CV) analysis was used to measure several aspects of head
             movement dynamics of autistic and neurotypical toddlers
             while they watched a set of brief movies with social and
             nonsocial content presented on a tablet. METHODS: Data were
             collected from 457 toddlers, 17-36 months old, during
             their well-child visit to four pediatric primary care
             clinics. Forty-one toddlers were subsequently diagnosed with
             autism. An application (app) displayed several brief movies
             on a tablet, and the toddlers watched these movies while
             sitting on their caregiver's lap. The front-facing camera in
             the tablet recorded the toddlers' behavioral responses. CV
             was used to measure the participants' head movement rate,
             movement acceleration, and complexity using multiscale
             entropy. RESULTS: Autistic toddlers exhibited significantly
             higher rate, acceleration, and complexity in their head
             movements while watching the movies compared to neurotypical
             toddlers, regardless of the type of movie content (social
             vs. nonsocial). The combined features of head movement
             acceleration and complexity reliably distinguished the
             autistic and neurotypical toddlers. CONCLUSIONS: Autistic
             toddlers exhibit differences in their head movement dynamics
             when viewing audiovisual stimuli. Higher complexity of their
             head movements suggests that their movements were less
             predictable and less stable compared to neurotypical
             toddlers. CV offers a scalable means of detecting subtle
             differences in head movement dynamics, which may be helpful
             in identifying early behaviors associated with autism and
             providing insight into the nature of sensorimotor
             differences associated with autism.},
   Doi = {10.1111/jcpp.13681},
   Key = {fds365564}
}

@article{fds371718,
   Author = {Solomon, O and Patriat, R and Braun, H and Palnitkar, TE and Moeller, S and Auerbach, EJ and Ugurbil, K and Sapiro, G and Harel,
             N},
   Title = {Motion robust magnetic resonance imaging via efficient
             Fourier aggregation.},
   Journal = {Medical image analysis},
   Volume = {83},
   Pages = {102638},
   Year = {2023},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.media.2022.102638},
   Abstract = {We present a method for suppressing motion artifacts in
             anatomical magnetic resonance acquisitions. Our proposed
             technique, termed MOTOR-MRI, can recover and salvage images
             which are otherwise heavily corrupted by motion induced
             artifacts and blur which renders them unusable. Contrary to
             other techniques, MOTOR-MRI operates on the reconstructed
             images and not on k-space data. It relies on breaking the
             standard acquisition protocol into several shorter ones
             (while maintaining the same total acquisition time) and
             subsequent efficient aggregation in Fourier space of locally
             sharp and consistent information among them, producing a
             sharp and motion mitigated image. We demonstrate the
             efficacy of the technique on T<sub>2</sub>-weighted turbo
             spin echo magnetic resonance brain scans with severe motion
             corruption from both 3 T and 7 T scanners and show
             significant qualitative and quantitative improvement in
             image quality. MOTOR-MRI can operate independently, or in
             conjunction with additional motion correction
             methods.},
   Doi = {10.1016/j.media.2022.102638},
   Key = {fds371718}
}

@article{fds359483,
   Author = {Babu, PRK and Di Martino and JM and Chang, Z and Perochon, S and Carpenter,
             KLH and Compton, S and Espinosa, S and Dawson, G and Sapiro,
             G},
   Title = {Exploring Complexity of Facial Dynamics in Autism Spectrum
             Disorder.},
   Journal = {IEEE Trans Affect Comput},
   Volume = {14},
   Number = {2},
   Pages = {919-930},
   Year = {2023},
   url = {http://dx.doi.org/10.1109/taffc.2021.3113876},
   Abstract = {Atypical facial expression is one of the early symptoms of
             autism spectrum disorder (ASD) characterized by reduced
             regularity and lack of coordination of facial movements.
             Automatic quantification of these behaviors can offer novel
             biomarkers for screening, diagnosis, and treatment
             monitoring of ASD. In this work, 40 toddlers with ASD and
             396 typically developing toddlers were shown
             developmentally-appropriate and engaging movies presented on
             a smart tablet during a well-child pediatric visit. The
             movies consisted of social and non-social dynamic scenes
             designed to evoke certain behavioral and affective
             responses. The front-facing camera of the tablet was used to
             capture the toddlers' face. Facial landmarks' dynamics were
             then automatically computed using computer vision
             algorithms. Subsequently, the complexity of the landmarks'
             dynamics was estimated for the eyebrows and mouth regions
             using multiscale entropy. Compared to typically developing
             toddlers, toddlers with ASD showed higher complexity (i.e.,
             less predictability) in these landmarks' dynamics. This
             complexity in facial dynamics contained novel information
             not captured by traditional facial affect analyses. These
             results suggest that computer vision analysis of facial
             landmark movements is a promising approach for detecting and
             quantifying early behavioral symptoms associated with
             ASD.},
   Doi = {10.1109/taffc.2021.3113876},
   Key = {fds359483}
}

@article{fds361207,
   Author = {Major, S and Isaev, D and Grapel, J and Calnan, T and Tenenbaum, E and Carpenter, K and Franz, L and Howard, J and Vermeer, S and Sapiro, G and Murias, M and Dawson, G},
   Title = {Shorter average look durations to dynamic social stimuli are
             associated with higher levels of autism symptoms in young
             autistic children.},
   Journal = {Autism},
   Volume = {26},
   Number = {6},
   Pages = {1451-1459},
   Year = {2022},
   Month = {August},
   url = {http://dx.doi.org/10.1177/13623613211056427},
   Abstract = {Many studies of autism look at the differences in how
             autistic research participants look at certain types of
             images. These studies often focus on where research
             participants are looking within the image, but that does not
             tell us everything about how much they are paying attention.
             It could be useful to know more about how well autistic
             research participants can focus on an image with people in
             it, because those who can look at images of people for
             longer duration without stopping may be able to easily learn
             other skills that help them to interact with people. We
             measured how long autistic research participants watched the
             video without breaking their attention. The video sometimes
             had a person speaking, and at other times had toys moving
             and making sounds. We measured the typical amount of time
             autistic research participants could look at the video
             before they looked away. We found that research participants
             with more severe autism tended to look at the video for
             shorter amounts of time. The ability to focus without
             stopping may be related to social skills in autistic
             people.},
   Doi = {10.1177/13623613211056427},
   Key = {fds361207}
}

@article{fds364075,
   Author = {Papadaki, A and Martinez, N and Bertran, M and Sapiro, G and Rodrigues,
             M},
   Title = {Minimax Demographic Group Fairness in Federated
             Learning},
   Journal = {ACM International Conference Proceeding Series},
   Pages = {142-159},
   Year = {2022},
   Month = {June},
   ISBN = {9781450393522},
   url = {http://dx.doi.org/10.1145/3531146.3533081},
   Abstract = {Federated learning is an increasingly popular paradigm that
             enables a large number of entities to collaboratively learn
             better models. In this work, we study minimax group fairness
             in federated learning scenarios where different
             participating entities may only have access to a subset of
             the population groups during the training phase. We formally
             analyze how our proposed group fairness objective differs
             from existing federated learning fairness criteria that
             impose similar performance across participants instead of
             demographic groups. We provide an optimization algorithm -
             FedMinMax - for solving the proposed problem that provably
             enjoys the performance guarantees of centralized learning
             algorithms. We experimentally compare the proposed approach
             against other state-of-the-art methods in terms of group
             fairness in various federated learning setups, showing that
             our approach exhibits competitive or superior
             performance.},
   Doi = {10.1145/3531146.3533081},
   Key = {fds364075}
}

@article{fds362056,
   Author = {Chaudhary, UN and Kelly, CN and Wesorick, BR and Reese, CM and Gall, K and Adams, SB and Sapiro, G and Di Martino and JM},
   Title = {Computational and image processing methods for analysis and
             automation of anatomical alignment and joint spacing in
             reconstructive surgery.},
   Journal = {Int J Comput Assist Radiol Surg},
   Volume = {17},
   Number = {3},
   Pages = {541-551},
   Year = {2022},
   Month = {March},
   url = {http://dx.doi.org/10.1007/s11548-021-02548-1},
   Abstract = {PURPOSE: Reconstructive surgeries to treat a number of
             musculoskeletal conditions, from arthritis to severe trauma,
             involve implant placement and reconstructive planning
             components. Anatomically matched 3D-printed implants are
             becoming increasingly patient-specific; however, the
             preoperative planning and design process requires several
             hours of manual effort from highly trained engineers and
             clinicians. Our work mitigates this problem by proposing
             algorithms for the automatic re-alignment of unhealthy
             anatomies, leading to more efficient, affordable, and
             scalable treatment solutions. METHODS: Our solution combines
             global alignment techniques such as iterative closest points
             with novel joint space refinement algorithms. The latter is
             achieved by a low-dimensional characterization of the joint
             space, computed from the distribution of the distance
             between adjacent points in a joint. RESULTS: Experimental
             validation is presented on real clinical data from human
             subjects. Compared with ground truth healthy anatomies, our
             algorithms can reduce misalignment errors by 22% in
             translation and 19% in rotation for the full foot-and-ankle
             and 37% in translation and 39% in rotation for the hindfoot
             only, achieving a performance comparable to expert
             technicians. CONCLUSION: Our methods and histogram-based
             metric allow for automatic and unsupervised alignment of
             anatomies along with techniques for global alignment of
             complex arrangements such as the foot-and-ankle system, a
             major step toward a fully automated and data-driven
             re-positioning, designing, and diagnosing
             tool.},
   Doi = {10.1007/s11548-021-02548-1},
   Key = {fds362056}
}

@article{fds359919,
   Author = {Kim, YK and Di Martino and JM and Nicholas, J and Rivera-Cancel, A and Wildes, JE and Marcus, MD and Sapiro, G and Zucker,
             N},
   Title = {Parent strategies for expanding food variety: Reflections of
             19,239 adults with symptoms of Avoidant/Restrictive Food
             Intake Disorder.},
   Journal = {Int J Eat Disord},
   Volume = {55},
   Number = {1},
   Pages = {108-119},
   Year = {2022},
   Month = {January},
   url = {http://dx.doi.org/10.1002/eat.23639},
   Abstract = {OBJECTIVE: To characterize helpful parent feeding strategies
             using reflections on childhood eating experiences of adults
             with symptoms of Avoidant/Restrictive Food Intake Disorder
             (ARFID). METHOD: We explored a unique text-based dataset
             gathered from a population of N = 19,239 self-identified
             adult "picky eaters." The sample included adults with
             symptoms of ARFID as evidenced by marked interference in
             psychosocial functioning, weight loss/sustained low weight,
             and/or nutritional deficiency (likely ARFID), and non-ARFID
             participants. We leveraged state-of-the-art natural language
             processing (NLP) methods to classify feeding strategies that
             were perceived as helpful or not helpful. The best
             classifiers that distinguished helpful approaches were
             further analyzed using qualitative coding according to a
             grounded theory approach. RESULTS: NLP reliably and
             accurately classified the perceived helpfulness of
             caregivers' feeding strategies (82%) and provided
             information about features of helpful parent strategies
             using recollections of adults with varying degrees of food
             avoidance. Strategies perceived as forceful were regarded as
             not helpful. Positive and encouraging strategies were
             perceived as helpful in improving attitudes toward food and
             minimizing social discomfort around eating. Although food
             variety improved, adults still struggled with a degree of
             avoidance/restriction. DISCUSSION: Adults perceived that
             positive parent feeding strategies were helpful even though
             they continued to experience some degree of food avoidance.
             Creating a positive emotional context surrounding food and
             eating with others may help to eliminate psychosocial
             impairment and increase food approach in those with severe
             food avoidance. Nevertheless, additional tools to optimize
             parent strategies and improve individuals' capacity to
             incorporate avoided foods and cope with challenging eating
             situations are needed.},
   Doi = {10.1002/eat.23639},
   Key = {fds359919}
}

@article{fds362455,
   Author = {Azami, H and Chang, Z and Arnold, SE and Sapiro, G and Gupta,
             AS},
   Title = {Detection of Oculomotor Dysmetria From Mobile Phone Video of
             the Horizontal Saccades Task Using Signal Processing and
             Machine Learning Approaches.},
   Journal = {IEEE access : practical innovations, open
             solutions},
   Volume = {10},
   Pages = {34022-34031},
   Year = {2022},
   Month = {January},
   url = {http://dx.doi.org/10.1109/access.2022.3156964},
   Abstract = {Eye movement assessments have the potential to help in
             diagnosis and tracking of neurological disorders. Cerebellar
             ataxias cause profound and characteristic abnormalities in
             smooth pursuit, saccades, and fixation. Oculomotor dysmetria
             (i.e., hypermetric and hypometric saccades) is a common
             finding in individuals with cerebellar ataxia. In this
             study, we evaluated a scalable approach for detecting and
             quantifying oculomotor dysmetria. Eye movement data were
             extracted from iPhone video recordings of the horizontal
             saccade task (a standard clinical task in ataxia) and
             combined with signal processing and machine learning
             approaches to quantify saccade abnormalities. Entropy-based
             measures of eye movements during saccades were significantly
             different in 72 individuals with ataxia with dysmetria
             compared with 80 ataxia and Parkinson's participants without
             dysmetria. A template matching-based analysis demonstrated
             that saccadic eye movements in patients without dysmetria
             were more similar to the ideal template of saccades. A
             support vector machine was then used to train and test the
             ability of multiple signal processing features in
             combination to distinguish individuals with and without
             oculomotor dysmetria. The model achieved 78% accuracy
             (sensitivity= 80% and specificity= 76%). These results show
             that the combination of signal processing and machine
             learning approaches applied to iPhone video of saccades,
             allow for extraction of information pertaining to oculomotor
             dysmetria in ataxia. Overall, this inexpensive and scalable
             approach for capturing important oculomotor information may
             be a useful component of a screening tool for ataxia and
             could allow frequent at-home assessments of oculomotor
             function in natural history studies and clinical
             trials.},
   Doi = {10.1109/access.2022.3156964},
   Key = {fds362455}
}

@article{fds363416,
   Author = {Zhu, W and Qiu, Q and Calderbank, R and Sapiro, G and Cheng,
             X},
   Title = {Scaling-Translation-Equivariant Networks with Decomposed
             Convolutional Filters},
   Journal = {Journal of Machine Learning Research},
   Volume = {23},
   Year = {2022},
   Month = {January},
   Abstract = {Encoding the scale information explicitly into the
             representation learned by a convolutional neural network
             (CNN) is beneficial for many computer vision tasks
             especially when dealing with multiscale inputs. We study, in
             this paper, a scaling-translation-equivariant (ST
             -equivariant) CNN with joint convolutions across the space
             and the scaling group, which is shown to be both sufficient
             and necessary to achieve equivariance for the regular
             representation of the scaling-translation group ST . To
             reduce the model complexity and computational burden, we
             decompose the convolutional filters under two pre-fixed
             separable bases and truncate the expansion to low-frequency
             components. A further benefit of the truncated filter
             expansion is the improved deformation robustness of the
             equivariant representation, a property which is
             theoretically analyzed and empirically verified. Numerical
             experiments demonstrate that the proposed
             scaling-translation-equivariant network with decomposed
             convolutional filters (ScDCFNet) achieves significantly
             improved performance in multiscale image classification and
             better interpretability than regular CNNs at a reduced model
             size.},
   Key = {fds363416}
}

@article{fds373569,
   Author = {Skerrett, E and Miao, Z and Asiedu, MN and Richards, M and Crouch, B and Sapiro, G and Qiu, Q and Ramanujam, N},
   Title = {Multicontrast Pocket Colposcopy Cervical Cancer Diagnostic
             Algorithm for Referral Populations.},
   Journal = {BME frontiers},
   Volume = {2022},
   Pages = {9823184},
   Year = {2022},
   Month = {January},
   url = {http://dx.doi.org/10.34133/2022/9823184},
   Abstract = {<i>Objective and Impact Statement</i>. We use deep learning
             models to classify cervix images-collected with a low-cost,
             portable Pocket colposcope-with biopsy-confirmed high-grade
             precancer and cancer. We boost classification performance on
             a screened-positive population by using a class-balanced
             loss and incorporating green-light colposcopy image pairs,
             which come at no additional cost to the provider.
             <i>Introduction</i>. Because the majority of the 300,000
             annual deaths due to cervical cancer occur in countries with
             low- or middle-Human Development Indices, an automated
             classification algorithm could overcome limitations caused
             by the low prevalence of trained professionals and
             diagnostic variability in provider visual interpretations.
             <i>Methods</i>. Our dataset consists of cervical images
             (n=1,760) from 880 patient visits. After optimizing the
             network architecture and incorporating a weighted loss
             function, we explore two methods of incorporating green
             light image pairs into the network to boost the
             classification performance and sensitivity of our model on a
             test set. <i>Results</i>. We achieve an area under the
             receiver-operator characteristic curve, sensitivity, and
             specificity of 0.87, 75%, and 88%, respectively. The
             addition of the class-balanced loss and green light cervical
             contrast to a Resnet-18 backbone results in a 2.5 times
             improvement in sensitivity. <i>Conclusion</i>. Our
             methodology, which has already been tested on a prescreened
             population, can boost classification performance and, in the
             future, be coupled with Pap smear or HPV triaging, thereby
             broadening access to early detection of precursor lesions
             before they advance to cancer.},
   Doi = {10.34133/2022/9823184},
   Key = {fds373569}
}

@article{fds367819,
   Author = {Simhal, AK and Carpenter, KLH and Kurtzberg, J and Song, A and Tannenbaum, A and Zhang, L and Sapiro, G and Dawson,
             G},
   Title = {Changes in the geometry and robustness of diffusion tensor
             imaging networks: Secondary analysis from a randomized
             controlled trial of young autistic children receiving an
             umbilical cord blood infusion.},
   Journal = {Front Psychiatry},
   Volume = {13},
   Pages = {1026279},
   Year = {2022},
   url = {http://dx.doi.org/10.3389/fpsyt.2022.1026279},
   Abstract = {Diffusion tensor imaging (DTI) has been used as an outcome
             measure in clinical trials for several psychiatric disorders
             but has rarely been explored in autism clinical trials. This
             is despite a large body of research suggesting altered white
             matter structure in autistic individuals. The current study
             is a secondary analysis of changes in white matter
             connectivity from a double-blind placebo-control trial of a
             single intravenous cord blood infusion in 2-7-year-old
             autistic children (1). Both clinical assessments and DTI
             were collected at baseline and 6 months after infusion. This
             study used two measures of white matter connectivity: change
             in node-to-node connectivity as measured through DTI
             streamlines and a novel measure of feedback network
             connectivity, Ollivier-Ricci curvature (ORC). ORC is a
             network measure which considers both local and global
             connectivity to assess the robustness of any given pathway.
             Using both the streamline and ORC analyses, we found
             reorganization of white matter pathways in predominantly
             frontal and temporal brain networks in autistic children who
             received umbilical cord blood treatment versus those who
             received a placebo. By looking at changes in network
             robustness, this study examined not only the direct,
             physical changes in connectivity, but changes with respect
             to the whole brain network. Together, these results suggest
             the use of DTI and ORC should be further explored as a
             potential biomarker in future autism clinical trials. These
             results, however, should not be interpreted as evidence for
             the efficacy of cord blood for improving clinical outcomes
             in autism. This paper presents a secondary analysis using
             data from a clinical trial that was prospectively registered
             with ClinicalTrials.gov(NCT02847182).},
   Doi = {10.3389/fpsyt.2022.1026279},
   Key = {fds367819}
}

@article{fds355497,
   Author = {Perochon, S and Di Martino and M and Aiello, R and Baker, J and Carpenter,
             K and Chang, Z and Compton, S and Davis, N and Eichner, B and Espinosa, S and Flowers, J and Franz, L and Gagliano, M and Harris, A and Howard, J and Kollins, SH and Perrin, EM and Raj, P and Spanos, M and Walter, B and Sapiro, G and Dawson, G},
   Title = {A scalable computational approach to assessing response to
             name in toddlers with autism.},
   Journal = {J Child Psychol Psychiatry},
   Volume = {62},
   Number = {9},
   Pages = {1120-1131},
   Year = {2021},
   Month = {September},
   url = {http://dx.doi.org/10.1111/jcpp.13381},
   Abstract = {BACKGROUND: This study is part of a larger research program
             focused on developing objective, scalable tools for digital
             behavioral phenotyping. We evaluated whether a digital app
             delivered on a smartphone or tablet using computer vision
             analysis (CVA) can elicit and accurately measure one of the
             most common early autism symptoms, namely failure to respond
             to a name call. METHODS: During a pediatric primary care
             well-child visit, 910 toddlers, 17-37 months old, were
             administered an app on an iPhone or iPad consisting of brief
             movies during which the child's name was called three times
             by an examiner standing behind them. Thirty-seven toddlers
             were subsequently diagnosed with autism spectrum disorder
             (ASD). Name calls and children's behavior were recorded by
             the camera embedded in the device, and children's head turns
             were coded by both CVA and a human. RESULTS: CVA coding of
             response to name was found to be comparable to human coding.
             Based on CVA, children with ASD responded to their name
             significantly less frequently than children without ASD. CVA
             also revealed that children with ASD who did orient to their
             name exhibited a longer latency before turning their head.
             Combining information about both the frequency and the delay
             in response to name improved the ability to distinguish
             toddlers with and without ASD. CONCLUSIONS: A digital app
             delivered on an iPhone or iPad in real-world settings using
             computer vision analysis to quantify behavior can reliably
             detect a key early autism symptom-failure to respond to
             name. Moreover, the higher resolution offered by CVA
             identified a delay in head turn in toddlers with ASD who did
             respond to their name. Digital phenotyping is a promising
             methodology for early assessment of ASD symptoms.},
   Doi = {10.1111/jcpp.13381},
   Key = {fds355497}
}

@article{fds356416,
   Author = {Chang, Z and Di Martino and JM and Aiello, R and Baker, J and Carpenter, K and Compton, S and Davis, N and Eichner, B and Espinosa, S and Flowers, J and Franz, L and Harris, A and Howard, J and Perochon, S and Perrin, EM and Krishnappa Babu and PR and Spanos, M and Sullivan, C and Walter, BK and Kollins, SH and Dawson, G and Sapiro, G},
   Title = {Computational Methods to Measure Patterns of Gaze in
             Toddlers With Autism Spectrum Disorder.},
   Journal = {JAMA Pediatr},
   Volume = {175},
   Number = {8},
   Pages = {827-836},
   Year = {2021},
   Month = {August},
   url = {http://dx.doi.org/10.1001/jamapediatrics.2021.0530},
   Abstract = {IMPORTANCE: Atypical eye gaze is an early-emerging symptom
             of autism spectrum disorder (ASD) and holds promise for
             autism screening. Current eye-tracking methods are expensive
             and require special equipment and calibration. There is a
             need for scalable, feasible methods for measuring eye gaze.
             OBJECTIVE: Using computational methods based on computer
             vision analysis, we evaluated whether an app deployed on an
             iPhone or iPad that displayed strategically designed brief
             movies could elicit and quantify differences in eye-gaze
             patterns of toddlers with ASD vs typical development.
             DESIGN, SETTING, AND PARTICIPANTS: A prospective study in
             pediatric primary care clinics was conducted from December
             2018 to March 2020, comparing toddlers with and without ASD.
             Caregivers of 1564 toddlers were invited to participate
             during a well-child visit. A total of 993 toddlers (63%)
             completed study measures. Enrollment criteria were aged 16
             to 38 months, healthy, English- or Spanish-speaking
             caregiver, and toddler able to sit and view the app.
             Participants were screened with the Modified Checklist for
             Autism in Toddlers-Revised With Follow-up during routine
             care. Children were referred by their pediatrician for
             diagnostic evaluation based on results of the checklist or
             if the caregiver or pediatrician was concerned. Forty
             toddlers subsequently were diagnosed with ASD. EXPOSURES: A
             mobile app displayed on a smartphone or tablet. MAIN
             OUTCOMES AND MEASURES: Computer vision analysis quantified
             eye-gaze patterns elicited by the app, which were compared
             between toddlers with ASD vs typical development. RESULTS:
             Mean age of the sample was 21.1 months (range, 17.1-36.9
             months), and 50.6% were boys, 59.8% White individuals, 16.5%
             Black individuals, 23.7% other race, and 16.9%
             Hispanic/Latino individuals. Distinctive eye-gaze patterns
             were detected in toddlers with ASD, characterized by reduced
             gaze to social stimuli and to salient social moments during
             the movies, and previously unknown deficits in coordination
             of gaze with speech sounds. The area under the receiver
             operating characteristic curve discriminating ASD vs non-ASD
             using multiple gaze features was 0.90 (95% CI, 0.82-0.97).
             CONCLUSIONS AND RELEVANCE: The app reliably measured both
             known and new gaze biomarkers that distinguished toddlers
             with ASD vs typical development. These novel results may
             have potential for developing scalable autism screening
             tools, exportable to natural settings, and enabling data
             sets amenable to machine learning.},
   Doi = {10.1001/jamapediatrics.2021.0530},
   Key = {fds356416}
}

@article{fds340786,
   Author = {Bovery, M and Dawson, G and Hashemi, J and Sapiro,
             G},
   Title = {A Scalable Off-the-Shelf Framework for Measuring Patterns of
             Attention in Young Children and its Application in Autism
             Spectrum Disorder.},
   Journal = {IEEE transactions on affective computing},
   Volume = {12},
   Number = {3},
   Pages = {722-731},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2021},
   Month = {July},
   url = {http://dx.doi.org/10.1109/taffc.2018.2890610},
   Abstract = {Autism spectrum disorder (ASD) is associated with deficits
             in the processing of social information and difficulties in
             social interaction, and individuals with ASD exhibit
             atypical attention and gaze. Traditionally, gaze studies
             have relied upon precise and constrained means of monitoring
             attention using expensive equipment in laboratories. In this
             work we develop a low-cost off-the-shelf alternative for
             measuring attention that can be used in natural settings.
             The head and iris positions of 104 16-31 months children, an
             age range appropriate for ASD screening and diagnosis, 22 of
             them diagnosed with ASD, were recorded using the front
             facing camera in an iPad while they watched on the device
             screen a movie displaying dynamic stimuli, social stimuli on
             the left and nonsocial stimuli on the right. The head and
             iris position were then automatically analyzed via computer
             vision algorithms to detect the direction of attention.
             Children in the ASD group paid less attention to the movie,
             showed less attention to the social as compared to the
             nonsocial stimuli, and often fixated their attention to one
             side of the screen. The proposed method provides a low-cost
             means of monitoring attention to properly designed stimuli,
             demonstrating that the integration of stimuli design and
             automatic response analysis results in the opportunity to
             use off-the-shelf cameras to assess behavioral
             biomarkers.},
   Doi = {10.1109/taffc.2018.2890610},
   Key = {fds340786}
}

@article{fds354530,
   Author = {Emani, PS and Warrell, J and Anticevic, A and Bekiranov, S and Gandal,
             M and McConnell, MJ and Sapiro, G and Aspuru-Guzik, A and Baker, JT and Bastiani, M and Murray, JD and Sotiropoulos, SN and Taylor, J and Senthil, G and Lehner, T and Gerstein, MB and Harrow,
             AW},
   Title = {Quantum computing at the frontiers of biological
             sciences.},
   Journal = {Nature methods},
   Volume = {18},
   Number = {7},
   Pages = {701-709},
   Year = {2021},
   Month = {July},
   url = {http://dx.doi.org/10.1038/s41592-020-01004-3},
   Doi = {10.1038/s41592-020-01004-3},
   Key = {fds354530}
}

@article{fds359524,
   Author = {Dong, H and Wang, Z and Qiu, Q and Sapiro, G},
   Title = {Using text to teach image retrieval},
   Journal = {IEEE Computer Society Conference on Computer Vision and
             Pattern Recognition Workshops},
   Pages = {1643-1652},
   Year = {2021},
   Month = {June},
   ISBN = {9781665448994},
   url = {http://dx.doi.org/10.1109/CVPRW53098.2021.00180},
   Abstract = {Image retrieval relies heavily on the quality of the data
             modeling and the distance measurement in the feature space.
             Building on the concept of image manifold, we first propose
             to represent the feature space of images, learned via neural
             networks, as a graph. Neighborhoods in the feature space are
             now defined by the geodesic distance between images,
             represented as graph vertices or manifold samples. When
             limited images are available, this manifold is sparsely
             sampled, making the geodesic computation and the
             corresponding retrieval harder. To address this, we augment
             the manifold samples with geometrically aligned text,
             thereby using a plethora of sentences to teach us about
             images. In addition to extensive results on standard
             datasets illustrating the power of text to help in image
             retrieval, a new public dataset based on CLEVR is introduced
             to quantify the semantic similarity between visual data and
             text data. The experimental results show that the joint
             embedding manifold is a robust representation, allowing it
             to be a better basis to perform image retrieval given only
             an image and a textual instruction on the desired
             modifications over the image.},
   Doi = {10.1109/CVPRW53098.2021.00180},
   Key = {fds359524}
}

@article{fds355623,
   Author = {Solomon, O and Palnitkar, T and Patriat, R and Braun, H and Aman, J and Park, MC and Vitek, J and Sapiro, G and Harel, N},
   Title = {Deep-learning based fully automatic segmentation of the
             globus pallidus interna and externa using ultra-high 7 Tesla
             MRI.},
   Journal = {Human brain mapping},
   Volume = {42},
   Number = {9},
   Pages = {2862-2879},
   Year = {2021},
   Month = {June},
   url = {http://dx.doi.org/10.1002/hbm.25409},
   Abstract = {Deep brain stimulation (DBS) surgery has been shown to
             dramatically improve the quality of life for patients with
             various motor dysfunctions, such as those afflicted with
             Parkinson's disease (PD), dystonia, and essential tremor
             (ET), by relieving motor symptoms associated with such
             pathologies. The success of DBS procedures is directly
             related to the proper placement of the electrodes, which
             requires the ability to accurately detect and identify
             relevant target structures within the subcortical basal
             ganglia region. In particular, accurate and reliable
             segmentation of the globus pallidus (GP) interna is of great
             interest for DBS surgery for PD and dystonia. In this study,
             we present a deep-learning based neural network, which we
             term GP-net, for the automatic segmentation of both the
             external and internal segments of the globus pallidus. High
             resolution 7 Tesla images from 101 subjects were used in
             this study; GP-net is trained on a cohort of 58 subjects,
             containing patients with movement disorders as well as
             healthy control subjects. GP-net performs 3D inference in a
             patient-specific manner, alleviating the need for
             atlas-based segmentation. GP-net was extensively validated,
             both quantitatively and qualitatively over 43 test subjects
             including patients with movement disorders and healthy
             control and is shown to consistently produce improved
             segmentation results compared with state-of-the-art
             atlas-based segmentations. We also demonstrate a
             postoperative lead location assessment with respect to a
             segmented globus pallidus obtained by GP-net.},
   Doi = {10.1002/hbm.25409},
   Key = {fds355623}
}

@article{fds352400,
   Author = {Carpenter, KLH and Hahemi, J and Campbell, K and Lippmann, SJ and Baker,
             JP and Egger, HL and Espinosa, S and Vermeer, S and Sapiro, G and Dawson,
             G},
   Title = {Digital Behavioral Phenotyping Detects Atypical Pattern of
             Facial Expression in Toddlers with Autism.},
   Journal = {Autism Res},
   Volume = {14},
   Number = {3},
   Pages = {488-499},
   Year = {2021},
   Month = {March},
   url = {http://dx.doi.org/10.1002/aur.2391},
   Abstract = {Commonly used screening tools for autism spectrum disorder
             (ASD) generally rely on subjective caregiver questionnaires.
             While behavioral observation is more objective, it is also
             expensive, time-consuming, and requires significant
             expertise to perform. As such, there remains a critical need
             to develop feasible, scalable, and reliable tools that can
             characterize ASD risk behaviors. This study assessed the
             utility of a tablet-based behavioral assessment for
             eliciting and detecting one type of risk behavior, namely,
             patterns of facial expression, in 104 toddlers (ASD N
             = 22) and evaluated whether such patterns differentiated
             toddlers with and without ASD. The assessment consisted of
             the child sitting on his/her caregiver's lap and watching
             brief movies shown on a smart tablet while the embedded
             camera recorded the child's facial expressions. Computer
             vision analysis (CVA) automatically detected and tracked
             facial landmarks, which were used to estimate head position
             and facial expressions (Positive, Neutral, All Other). Using
             CVA, specific points throughout the movies were identified
             that reliably differentiate between children with and
             without ASD based on their patterns of facial movement and
             expressions (area under the curves for individual movies
             ranging from 0.62 to 0.73). During these instances, children
             with ASD more frequently displayed Neutral expressions
             compared to children without ASD, who had more All Other
             expressions. The frequency of All Other expressions was
             driven by non-ASD children more often displaying raised
             eyebrows and an open mouth, characteristic of
             engagement/interest. Preliminary results suggest
             computational coding of facial movements and expressions via
             a tablet-based assessment can detect differences in
             affective expression, one of the early, core features of
             ASD. LAY SUMMARY: This study tested the use of a tablet in
             the behavioral assessment of young children with autism.
             Children watched a series of developmentally appropriate
             movies and their facial expressions were recorded using the
             camera embedded in the tablet. Results suggest that
             computational assessments of facial expressions may be
             useful in early detection of symptoms of
             autism.},
   Doi = {10.1002/aur.2391},
   Key = {fds352400}
}

@article{fds359295,
   Author = {Achddou, R and Di Martino and JM and Sapiro, G},
   Title = {Nested learning for multi-level classification},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2021-June},
   Pages = {2815-2819},
   Year = {2021},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ICASSP39728.2021.9415076},
   Abstract = {Deep neural networks models are generally designed and
             trained for a specific type and quality of data. In this
             work, we address this problem in the context of nested
             learning. For many applications, both the input data, at
             training and testing, and the prediction can be conceived at
             multiple nested quality/resolutions. We show that by
             leveraging this multiscale information, the problem of poor
             generalization and prediction overconfidence, as well as the
             exploitation of multiple training data quality, can be
             efficiently addressed. We evaluate the proposed ideas in six
             public datasets: MNIST, Fashion-MNIST, CIFAR10, CIFAR100,
             Plantvillage, and DBPEDIA. We observe that coarsely
             annotated data can help to solve fine predictions and reduce
             overconfidence significantly. We also show that hierarchical
             learning produces models intrinsically more robust to
             adversarial attacks and data perturbations.},
   Doi = {10.1109/ICASSP39728.2021.9415076},
   Key = {fds359295}
}

@article{fds354237,
   Author = {Di Martino and JM and Qiu, Q and Sapiro, G},
   Title = {Rethinking Shape From Shading for Spoofing
             Detection.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {30},
   Pages = {1086-1099},
   Year = {2021},
   Month = {January},
   url = {http://dx.doi.org/10.1109/tip.2020.3042082},
   Abstract = {Spoofing attacks are critical threats to modern face
             recognition systems, and most common countermeasures exploit
             2D texture features as they are easy to extract and deploy.
             3D shape-based methods can substantially improve spoofing
             prevention, but extracting the 3D shape of the face often
             requires complex hardware such as a 3D scanner and expensive
             computation. Motivated by the classical shape-from-shading
             model, we propose to obtain 3D facial features that can be
             used to recognize the presence of an actual 3D face, without
             explicit shape reconstruction. Such shading-based 3D
             features are extracted highly efficiently from a pair of
             images captured under different illumination, e.g., two
             images captured with and without flash. Thus the proposed
             method provides a rich 3D geometrical representation at
             negligible computational cost and minimal to none additional
             hardware. A theoretical analysis is provided to support why
             such simple 3D features can effectively describe the
             presence of an actual 3D shape while avoiding complicated
             calibration steps or hardware setup. Experimental validation
             shows that the proposed method can produce state-of-the-art
             spoofing prevention and enhance existing texture-based
             solutions.},
   Doi = {10.1109/tip.2020.3042082},
   Key = {fds354237}
}

@article{fds362359,
   Author = {Wang, Z and Ding, S and Li, Y and Fenn, J and Roychowdhury, S and Wallin,
             A and Martin, L and Ryvola, S and Sapiro, G and Qiu,
             Q},
   Title = {Cirrus: A Long-range Bi-pattern LiDAR Dataset},
   Journal = {Proceedings - IEEE International Conference on Robotics and
             Automation},
   Volume = {2021-May},
   Pages = {5744-5750},
   Year = {2021},
   Month = {January},
   ISBN = {9781728190778},
   url = {http://dx.doi.org/10.1109/ICRA48506.2021.9561267},
   Abstract = {In this paper, we introduce Cirrus, a new long-range
             bi-pattern LiDAR public dataset for autonomous driving tasks
             such as 3D object detection, critical to highway driving and
             timely decision making. Our platform is equipped with a
             high-resolution video camera and a pair of LiDAR sensors
             with a 250-meter effective range, which is significantly
             longer than existing public datasets. We record paired point
             clouds simultaneously using both Gaussian and uniform
             scanning patterns. Point density varies significantly across
             such a long range, and different scanning patterns further
             diversify object representation in LiDAR. In Cirrus, eight
             categories of objects are exhaustively annotated in the
             LiDAR point clouds for the entire effective range. To
             illustrate the kind of studies supported by this new
             dataset, we introduce LiDAR model adaptation across
             different ranges, scanning patterns, and sensor devices.
             Promising results show the great potential of this new
             dataset to the robotics and computer vision
             communities.},
   Doi = {10.1109/ICRA48506.2021.9561267},
   Key = {fds362359}
}

@article{fds371279,
   Author = {Martinez, N and Bertran, M and Papadaki, A and Rodrigues, M and Sapiro,
             G},
   Title = {Blind Pareto Fairness and Subgroup Robustness},
   Journal = {Proceedings of Machine Learning Research},
   Volume = {139},
   Pages = {7492-7501},
   Year = {2021},
   Month = {January},
   ISBN = {9781713845065},
   Abstract = {Much of the work in the field of group fairness addresses
             disparities between predefined groups based on protected
             features such as gender, age, and race, which need to be
             available at train, and often also at test, time. These
             approaches are static and retrospective, since algorithms
             designed to protect groups identified a priori cannot
             anticipate and protect the needs of different at-risk groups
             in the future. In this work we analyze the space of
             solutions for worst-case fairness beyond demographics, and
             propose Blind Pareto Fairness (BPF), a method that leverages
             no-regret dynamics to recover a fair minimax classifier that
             reduces worst-case risk of any potential subgroup of
             sufficient size, and guarantees that the remaining
             population receives the best possible level of service. BPF
             addresses fairness beyond demographics, that is, it does not
             rely on predefined notions of at-risk groups, neither at
             train nor at test time. Our experimental results show that
             the proposed framework improves worst-case risk in multiple
             standard datasets, while simultaneously providing better
             levels of service for the remaining population. The code is
             available at github.com/natalialmg/BlindParetoFairness.},
   Key = {fds371279}
}

@article{fds338014,
   Author = {Hashemi, J and Dawson, G and Carpenter, KLH and Campbell, K and Qiu, Q and Espinosa, S and Marsan, S and Baker, JP and Egger, HL and Sapiro,
             G},
   Title = {Computer Vision Analysis for Quantification of Autism Risk
             Behaviors.},
   Journal = {IEEE Trans Affect Comput},
   Volume = {12},
   Number = {1},
   Pages = {215-226},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2021},
   url = {http://dx.doi.org/10.1109/taffc.2018.2868196},
   Abstract = {Observational behavior analysis plays a key role for the
             discovery and evaluation of risk markers for many
             neurodevelopmental disorders. Research on autism spectrum
             disorder (ASD) suggests that behavioral risk markers can be
             observed at 12 months of age or earlier, with diagnosis
             possible at 18 months. To date, these studies and
             evaluations involving observational analysis tend to rely
             heavily on clinical practitioners and specialists who have
             undergone intensive training to be able to reliably
             administer carefully designed behavioural-eliciting tasks,
             code the resulting behaviors, and interpret such behaviors.
             These methods are therefore extremely expensive,
             time-intensive, and are not easily scalable for large
             population or longitudinal observational analysis. We
             developed a self-contained, closed-loop, mobile application
             with movie stimuli designed to engage the child's attention
             and elicit specific behavioral and social responses, which
             are recorded with a mobile device camera and then analyzed
             via computer vision algorithms. Here, in addition to
             presenting this paradigm, we validate the system to measure
             engagement, name-call responses, and emotional responses of
             toddlers with and without ASD who were presented with the
             application. Additionally, we show examples of how the
             proposed framework can further risk marker research with
             fine-grained quantification of behaviors. The results
             suggest these objective and automatic methods can be
             considered to aid behavioral analysis, and can be suited for
             objective automatic analysis for future studies.},
   Doi = {10.1109/taffc.2018.2868196},
   Key = {fds338014}
}

@article{fds349902,
   Author = {Major, S and Campbell, K and Espinosa, S and Baker, JP and Carpenter,
             KL and Sapiro, G and Vermeer, S and Dawson, G},
   Title = {Impact of a digital Modified Checklist for Autism in
             Toddlers-Revised on likelihood and age of autism diagnosis
             and referral for developmental evaluation.},
   Journal = {Autism},
   Volume = {24},
   Number = {7},
   Pages = {1629-1638},
   Year = {2020},
   Month = {October},
   url = {http://dx.doi.org/10.1177/1362361320916656},
   Abstract = {This was a project in primary care for young children
             (1-2 years old). We tested a parent questionnaire on a
             tablet. This tablet questionnaire asked questions to see
             whether the child may have autism. We compared the paper and
             pencil version of the questionnaire to the tablet
             questionnaire. We read the medical charts for the children
             until they were 4 years old to see whether they ended up
             having autism. We found that doctors were more likely to
             recommend an autism evaluation when a parent used the tablet
             questionnaire. We think that the tablet's automatic scoring
             feature helped the doctors. We also think that the doctors
             benefited from the advice the tablet gave
             them.},
   Doi = {10.1177/1362361320916656},
   Key = {fds349902}
}

@article{fds353020,
   Author = {Chang, Z and Chen, Z and Stephen, CD and Schmahmann, JD and Wu, H-T and Sapiro, G and Gupta, AS},
   Title = {Accurate detection of cerebellar smooth pursuit eye movement
             abnormalities via mobile phone video and machine
             learning.},
   Journal = {Scientific reports},
   Volume = {10},
   Number = {1},
   Pages = {18641},
   Year = {2020},
   Month = {October},
   url = {http://dx.doi.org/10.1038/s41598-020-75661-x},
   Abstract = {Eye movements are disrupted in many neurodegenerative
             diseases and are frequent and early features in conditions
             affecting the cerebellum. Characterizing eye movements is
             important for diagnosis and may be useful for tracking
             disease progression and response to therapies. Assessments
             are limited as they require an in-person evaluation by a
             neurology subspecialist or specialized and expensive
             equipment. We tested the hypothesis that important eye
             movement abnormalities in cerebellar disorders (i.e.,
             ataxias) could be captured from iPhone video. Videos of the
             face were collected from individuals with ataxia
             (n = 102) and from a comparative population (Parkinson's
             disease or healthy participants, n = 61). Computer
             vision algorithms were used to track the position of the eye
             which was transformed into high temporal resolution spectral
             features. Machine learning models trained on eye movement
             features were able to identify abnormalities in smooth
             pursuit (a key eye behavior) and accurately distinguish
             individuals with abnormal pursuit from controls
             (sensitivity = 0.84, specificity = 0.77). A novel
             machine learning approach generated severity estimates that
             correlated well with the clinician scores. We demonstrate
             the feasibility of capturing eye movement information using
             an inexpensive and widely accessible technology. This may be
             a useful approach for disease screening and for measuring
             severity in clinical trials.},
   Doi = {10.1038/s41598-020-75661-x},
   Key = {fds353020}
}

@article{fds348928,
   Author = {Tenenbaum, EJ and Carpenter, KLH and Sabatos-DeVito, M and Hashemi,
             J and Vermeer, S and Sapiro, G and Dawson, G},
   Title = {A Six-Minute Measure of Vocalizations in Toddlers with
             Autism Spectrum Disorder.},
   Journal = {Autism Res},
   Volume = {13},
   Number = {8},
   Pages = {1373-1382},
   Year = {2020},
   Month = {August},
   url = {http://dx.doi.org/10.1002/aur.2293},
   Abstract = {To improve early identification of autism spectrum disorder
             (ASD), we need objective, reliable, and accessible measures.
             To that end, a previous study demonstrated that a
             tablet-based application (app) that assessed several autism
             risk behaviors distinguished between toddlers with ASD and
             non-ASD toddlers. Using vocal data collected during this
             study, we investigated whether vocalizations uttered during
             administration of this app can distinguish among toddlers
             aged 16-31 months with typical development (TD), language
             or developmental delay (DLD), and ASD. Participant's visual
             and vocal responses were recorded using the camera and
             microphone in a tablet while toddlers watched movies
             designed to elicit behaviors associated with risk for ASD.
             Vocalizations were then coded offline. Results showed that
             (a) children with ASD and DLD were less likely to produce
             words during app administration than TD participants; (b)
             the ratio of syllabic vocalizations to all vocalizations was
             higher among TD than ASD or DLD participants; and (c) the
             rates of nonsyllabic vocalizations were higher in the ASD
             group than in either the TD or DLD groups. Those producing
             more nonsyllabic vocalizations were 24 times more likely to
             be diagnosed with ASD. These results lend support to
             previous findings that early vocalizations might be useful
             in identifying risk for ASD in toddlers and demonstrate the
             feasibility of using a scalable tablet-based app for
             assessing vocalizations in the context of a routine
             pediatric visit. LAY SUMMARY: Although parents often report
             symptoms of autism spectrum disorder (ASD) in infancy, we
             are not yet reliably diagnosing ASD until much later in
             development. A previous study tested a tablet-based
             application (app) that recorded behaviors we know are
             associated with ASD to help identify children at risk for
             the disorder. Here we measured how children vocalize while
             they watched the movies presented on the tablet. Children
             with ASD were less likely to produce words, less likely to
             produce speechlike sounds, and more likely to produce
             atypical sounds while watching these movies. These measures,
             combined with other behaviors measured by the app, might
             help identify which children should be evaluated for ASD.
             Autism Res 2020, 13: 1373-1382. © 2020 International
             Society for Autism Research, Wiley Periodicals,
             Inc.},
   Doi = {10.1002/aur.2293},
   Key = {fds348928}
}

@article{fds371280,
   Author = {Isaev, DY and Tchapyjnikov, D and Cotten, CM and Tanaka, D and Martinez,
             N and Bertran, M and Sapiro, G and Carlson, D},
   Title = {Attention-Based Network for Weak Labels in Neonatal Seizure
             Detection.},
   Journal = {Proc Mach Learn Res},
   Volume = {126},
   Pages = {479-507},
   Year = {2020},
   Month = {August},
   Abstract = {Seizures are a common emergency in the neonatal intesive
             care unit (NICU) among newborns receiving therapeutic
             hypothermia for hypoxic ischemic encephalopathy. The high
             incidence of seizures in this patient population
             necessitates continuous electroencephalographic (EEG)
             monitoring to detect and treat them. Due to EEG recordings
             being reviewed intermittently throughout the day, inevitable
             delays to seizure identification and treatment arise. In
             recent years, work on neonatal seizure detection using deep
             learning algorithms has started gaining momentum. These
             algorithms face numerous challenges: first, the training
             data for such algorithms comes from individual patients,
             each with varying levels of label imbalance since the
             seizure burden in NICU patients differs by several orders of
             magnitude. Second, seizures in neonates are usually
             localized in a subset of EEG channels, and performing
             annotations per channel is very time-consuming. Hence models
             which make use of labels only per time periods, and not per
             channels, are preferable. In this work we assess how
             different deep learning models and data balancing methods
             influence learning in neonatal seizure detection in EEGs. We
             propose a model which provides a level of importance to each
             of the EEG channels - a proxy to whether a channel exhibits
             seizure activity or not, and we provide a quantitative
             assessment of how well this mechanism works. The model is
             portable to EEG devices with differing layouts without
             retraining, facilitating its potential deployment across
             different medical centers. We also provide a first
             assessment of how a deep learning model for neonatal seizure
             detection agrees with human rater decisions - an important
             milestone for deployment to clinical practice. We show that
             high AUC values in a deep learning model do not necessarily
             correspond to agreement with a human expert, and there is
             still a need to further refine such algorithms for optimal
             seizure discrimination.},
   Key = {fds371280}
}

@article{fds350555,
   Author = {Simhal, AK and Carpenter, KLH and Nadeem, S and Kurtzberg, J and Song,
             A and Tannenbaum, A and Sapiro, G and Dawson, G},
   Title = {Measuring robustness of brain networks in autism spectrum
             disorder with Ricci curvature.},
   Journal = {Sci Rep},
   Volume = {10},
   Number = {1},
   Pages = {10819},
   Year = {2020},
   Month = {July},
   url = {http://dx.doi.org/10.1038/s41598-020-67474-9},
   Abstract = {Ollivier-Ricci curvature is a method for measuring the
             robustness of connections in a network. In this work, we use
             curvature to measure changes in robustness of brain networks
             in children with autism spectrum disorder (ASD). In an open
             label clinical trials, participants with ASD were
             administered a single infusion of autologous umbilical cord
             blood and, as part of their clinical outcome measures, were
             imaged with diffusion MRI before and after the infusion. By
             using Ricci curvature to measure changes in robustness, we
             quantified both local and global changes in the brain
             networks and their potential relationship with the infusion.
             Our results find changes in the curvature of the connections
             between regions associated with ASD that were not detected
             via traditional brain network analysis.},
   Doi = {10.1038/s41598-020-67474-9},
   Key = {fds350555}
}

@article{fds350115,
   Author = {Martino, JMD and Suzacq, F and Delbracio, M and Qiu, Q and Sapiro,
             G},
   Title = {Differential 3D Facial Recognition: Adding 3D to Your
             State-of-the-Art 2D Method.},
   Journal = {IEEE transactions on pattern analysis and machine
             intelligence},
   Volume = {42},
   Number = {7},
   Pages = {1582-1593},
   Year = {2020},
   Month = {July},
   url = {http://dx.doi.org/10.1109/tpami.2020.2986951},
   Abstract = {Active illumination is a prominent complement to enhance 2D
             face recognition and make it more robust, e.g., to spoofing
             attacks and low-light conditions. In the present work we
             show that it is possible to adopt active illumination to
             enhance state-of-the-art 2D face recognition approaches with
             3D features, while bypassing the complicated task of 3D
             reconstruction. The key idea is to project over the test
             face a high spatial frequency pattern, which allows us to
             simultaneously recover real 3D information plus a standard
             2D facial image. Therefore, state-of-the-art 2D face
             recognition solution can be transparently applied, while
             from the high frequency component of the input image,
             complementary 3D facial features are extracted. Experimental
             results on ND-2006 dataset show that the proposed ideas can
             significantly boost face recognition performance and
             dramatically improve the robustness to spoofing
             attacks.},
   Doi = {10.1109/tpami.2020.2986951},
   Key = {fds350115}
}

@article{fds352450,
   Author = {Asiedu, MN and Skerrett, E and Sapiro, G and Ramanujam,
             N},
   Title = {Combining multiple contrasts for improving machine
             learning-based classification of cervical cancers with a
             low-cost point-of-care Pocket colposcope.},
   Journal = {Annual International Conference of the IEEE Engineering in
             Medicine and Biology Society. IEEE Engineering in Medicine
             and Biology Society. Annual International
             Conference},
   Volume = {2020},
   Pages = {1148-1151},
   Year = {2020},
   Month = {July},
   ISBN = {9781728119908},
   url = {http://dx.doi.org/10.1109/embc44109.2020.9175858},
   Abstract = {We apply feature-extraction and machine learning methods to
             multiple sources of contrast (acetic acid, Lugol's iodine
             and green light) from the white Pocket Colposcope, a
             low-cost point of care colposcope for cervical cancer
             screening. We combine features from the sources of contrast
             and analyze diagnostic improvements with addition of each
             contrast. We find that overall AUC increases with additional
             contrast agents compared to using only one
             source.},
   Doi = {10.1109/embc44109.2020.9175858},
   Key = {fds352450}
}

@article{fds355927,
   Author = {Isaev, DY and Major, S and Murias, M and Carpenter, KLH and Carlson, D and Sapiro, G and Dawson, G},
   Title = {Relative Average Look Duration and its Association with
             Neurophysiological Activity in Young Children with Autism
             Spectrum Disorder.},
   Journal = {Sci Rep},
   Volume = {10},
   Number = {1},
   Pages = {1912},
   Year = {2020},
   Month = {February},
   url = {http://dx.doi.org/10.1038/s41598-020-57902-1},
   Abstract = {Autism Spectrum Disorder (ASD) is characterized by early
             attentional differences that often precede the hallmark
             symptoms of social communication impairments. Development of
             novel measures of attentional behaviors may lead to earlier
             identification of children at risk for ASD. In this work, we
             first introduce a behavioral measure, Relative Average Look
             Duration (RALD), indicating attentional preference to
             different stimuli, such as social versus nonsocial stimuli;
             and then study its association with neurophysiological
             activity. We show that (1) ASD and typically developing (TD)
             children differ in both (absolute) Average Look Duration
             (ALD) and RALD to stimuli during an EEG experiment, with the
             most pronounced differences in looking at social stimuli;
             and (2) associations between looking behaviors and
             neurophysiological activity, as measured by EEG, are
             different for children with ASD versus TD. Even when ASD
             children show attentional engagement to social content, our
             results suggest that their underlying brain activity is
             different than TD children. This study therefore introduces
             a new measure of social/nonsocial attentional preference in
             ASD and demonstrates the value of incorporating attentional
             variables measured simultaneously with EEG into the analysis
             pipeline.},
   Doi = {10.1038/s41598-020-57902-1},
   Key = {fds355927}
}

@article{fds348352,
   Author = {Dawson, G and Campbell, K and Hashemi, J and Lippmann, SJ and Smith, V and Carpenter, K and Egger, H and Espinosa, S and Vermeer, S and Baker, J and Sapiro, G},
   Title = {Author Correction: Atypical postural control can be detected
             via computer vision analysis in toddlers with autism
             spectrum disorder.},
   Journal = {Sci Rep},
   Volume = {10},
   Number = {1},
   Pages = {616},
   Year = {2020},
   Month = {January},
   url = {http://dx.doi.org/10.1038/s41598-020-57570-1},
   Abstract = {An amendment to this paper has been published and can be
             accessed via a link at the top of the paper.},
   Doi = {10.1038/s41598-020-57570-1},
   Key = {fds348352}
}

@article{fds349199,
   Author = {Giryes, R and Sapiro, G and Bronstein, AM},
   Title = {Erratum: Deep neural networks with random Gaussian weights:
             A universal classification strategy? (IEEE Transactions on
             Signal Processing (2016) 64:13 (3444-3457) DOI:
             10.1109/TSP.2016.2546221)},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {68},
   Pages = {529-531},
   Year = {2020},
   Month = {January},
   url = {http://dx.doi.org/10.1109/TSP.2019.2961228},
   Abstract = {Following a comment correspondence paper, we agree that
             there is a mistake in one of the formulas in the paper "Deep
             Neural Networks with Random Gaussian Weights: A Universal
             Classification Strategy? "We show that this error only
             impacts one claim in the original paper.},
   Doi = {10.1109/TSP.2019.2961228},
   Key = {fds349199}
}

@article{fds353070,
   Author = {Cohen, G and Sapiro, G and Giryes, R},
   Title = {Detecting Adversarial Samples Using Influence Functions and
             Nearest Neighbors},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Pages = {14441-14450},
   Year = {2020},
   Month = {January},
   url = {http://dx.doi.org/10.1109/CVPR42600.2020.01446},
   Abstract = {Deep neural networks (DNNs) are notorious for their
             vulnerability to adversarial attacks, which are small
             perturbations added to their input images to mislead their
             prediction. Detection of adversarial examples is, therefore,
             a fundamental requirement for robust classification
             frameworks. In this work, we present a method for detecting
             such adversarial attacks, which is suitable for any
             pre-trained neural network classifier. We use influence
             functions to measure the impact of every training sample on
             the validation set data. From the influence scores, we find
             the most supportive training samples for any given
             validation example. A k-nearest neighbor (k-NN) model fitted
             on the DNN's activation layers is employed to search for the
             ranking of these supporting training samples. We observe
             that these samples are highly correlated with the nearest
             neighbors of the normal inputs, while this correlation is
             much weaker for adversarial inputs. We train an adversarial
             detector using the k-NN ranks and distances and show that it
             successfully distinguishes adversarial examples, getting
             state-of-the-art results on six attack methods with three
             datasets. Code is available at https://github.com/giladcohen/NNIF_adv_defense.},
   Doi = {10.1109/CVPR42600.2020.01446},
   Key = {fds353070}
}

@article{fds356485,
   Author = {Martinez, N and Bertran, M and Sapiro, G},
   Title = {Minimax pareto fairness: A multi objective
             perspective},
   Journal = {37th International Conference on Machine Learning, ICML
             2020},
   Volume = {PartF168147-9},
   Pages = {6711-6720},
   Year = {2020},
   Month = {January},
   ISBN = {9781713821120},
   Abstract = {In this work we formulate and formally characterize group
             fairness as a multi-objective optimization problem, where
             each sensitive group risk is a separate objective. We
             propose a fairness criterion where a classifier achieves
             minimax risk and is Pareto-efficient w.r.t. all groups,
             avoiding unnecessary harm, and can lead to the best zero-gap
             model if policy dictates so. We provide a simple
             optimization algorithm compatible with deep neural networks
             to satisfy these constraints. Since our method does not
             require test-Time access to sensitive attributes, it can be
             applied to reduce worst-case classification errors between
             outcomes in unbalanced classification problems. We test the
             proposed methodology on real case-studies of predicting
             income, ICU patient mortality, skin lesions classification,
             and assessing credit risk, demonstrating how our framework
             compares favorably to other approaches.},
   Key = {fds356485}
}

@article{fds357435,
   Author = {Wang, Z and Cheng, X and Sapiro, G and Qiu, Q},
   Title = {A dictionary approach to domain-invariant learning in deep
             networks},
   Journal = {Advances in Neural Information Processing
             Systems},
   Volume = {2020-December},
   Year = {2020},
   Month = {January},
   Abstract = {In this paper, we consider domain-invariant deep learning by
             explicitly modeling domain shifts with only a small amount
             of domain-specific parameters in a Convolutional Neural
             Network (CNN). By exploiting the observation that a
             convolutional filter can be well approximated as a linear
             combination of a small set of dictionary atoms, we show for
             the first time, both empirically and theoretically, that
             domain shifts can be effectively handled by decomposing a
             convolutional layer into a domain-specific atom layer and a
             domain-shared coefficient layer, while both remain
             convolutional. An input channel will now first convolve
             spatially only with each respective domain-specific
             dictionary atom to “absorb" domain variations, and then
             output channels are linearly combined using common
             decomposition coefficients trained to promote shared
             semantics across domains. We use toy examples, rigorous
             analysis, and real-world examples with diverse datasets and
             architectures, to show the proposed plug-in framework’s
             effectiveness in cross and joint domain performance and
             domain adaptation. With the proposed architecture, we need
             only a small set of dictionary atoms to model each
             additional domain, which brings a negligible amount of
             additional parameters, typically a few hundred.},
   Key = {fds357435}
}

@article{fds357585,
   Author = {Bertran, M and Martinezf, N and Phielipp, M and Sapiro,
             G},
   Title = {Instance-based generalization in reinforcement
             learning},
   Journal = {Advances in Neural Information Processing
             Systems},
   Volume = {2020-December},
   Year = {2020},
   Month = {January},
   Abstract = {Agents trained via deep reinforcement learning (RL)
             routinely fail to generalize to unseen environments, even
             when these share the same underlying dynamics as the
             training levels. Understanding the generalization properties
             of RL is one of the challenges of modern machine learning.
             Towards this goal, we analyze policy learning in the context
             of Partially Observable Markov Decision Processes (POMDPs)
             and formalize the dynamics of training levels as instances.
             We prove that, independently of the exploration strategy,
             reusing instances introduces significant changes on the
             effective Markov dynamics the agent observes during
             training. Maximizing expected rewards impacts the learned
             belief state of the agent by inducing undesired
             instance-specific speed-running policies instead of
             generaliz-able ones, which are sub-optimal on the training
             set. We provide generalization bounds to the value gap in
             train and test environments based on the number of training
             instances, and use insights based on these to improve
             performance on unseen levels. We propose training a shared
             belief representation over an ensemble of specialized
             policies, from which we compute a consensus policy that is
             used for data collection, disallowing instance-specific
             exploitation. We experimentally validate our theory,
             observations, and the proposed computational solution over
             the CoinRun benchmark.},
   Key = {fds357585}
}

@article{fds370058,
   Author = {Wang, Z and Cheng, X and Sapiro, G and Qiu, Q},
   Title = {STOCHASTIC CONDITIONAL GENERATIVE NETWORKS WITH BASIS
             DECOMPOSITION},
   Journal = {8th International Conference on Learning Representations,
             ICLR 2020},
   Year = {2020},
   Month = {January},
   Abstract = {While generative adversarial networks (GANs) have
             revolutionized machine learning, a number of open questions
             remain to fully understand them and exploit their power. One
             of these questions is how to efficiently achieve proper
             diversity and sampling of the multi-mode data space. To
             address this, we introduce BasisGAN, a stochastic
             conditional multi-mode image generator. By exploiting the
             observation that a convolutional filter can be well
             approximated as a linear combination of a small set of basis
             elements, we learn a plug-and-played basis generator to
             stochastically generate basis elements, with just a few
             hundred of parameters, to fully embed stochasticity into
             convolutional filters. By sampling basis elements instead of
             filters, we dramatically reduce the cost of modeling the
             parameter space with no sacrifice on either image diversity
             or fidelity. To illustrate this proposed plug-and-play
             framework, we construct variants of BasisGAN based on
             state-of-the-art conditional image generation networks, and
             train the networks by simply plugging in a basis generator,
             without additional auxiliary components, hyperparameters, or
             training objectives. The experimental success is
             complemented with theoretical results indicating how the
             perturbations introduced by the proposed sampling of basis
             elements can propagate to the appearance of generated
             images.},
   Key = {fds370058}
}

@article{fds349134,
   Author = {Wang, Z and DIng, S and Li, Y and Zhao, M and Roychowdhury, S and Wallin,
             A and Sapiro, G and Qiu, Q},
   Title = {Range adaptation for 3d object detection in
             LiDAR},
   Journal = {Proceedings - 2019 International Conference on Computer
             Vision Workshop, ICCVW 2019},
   Pages = {2320-2328},
   Year = {2019},
   Month = {October},
   url = {http://dx.doi.org/10.1109/ICCVW.2019.00285},
   Abstract = {LiDAR-based 3D object detection plays a crucial role in
             modern autonomous driving systems. LiDAR data often exhibit
             severe changes in properties across different observation
             ranges. In this paper, we explore cross-range adaptation for
             3D object detection using LiDAR, i.e., far-range
             observations are adapted to near-range. This way, far-range
             detection is optimized for similar performance to near-range
             one. We adopt a bird-eyes view (BEV) detection framework to
             perform the proposed model adaptation. Our model adaptation
             consists of an adversarial global adaptation, and a
             fine-grained local adaptation. The proposed cross-range
             adaptation framework is validated on three state-of-the-art
             LiDAR based object detection networks, and we consistently
             observe performance improvement on the far-range objects,
             without adding any auxiliary parameters to the model. To the
             best of our knowledge, this paper is the first attempt to
             study cross-range LiDAR adaptation for object detection in
             point clouds. To demonstrate the generality of the proposed
             adaptation framework, experiments on more challenging
             cross-device adaptation are further conducted, and a new
             LiDAR dataset with high-quality annotated point clouds is
             released to promote future research.},
   Doi = {10.1109/ICCVW.2019.00285},
   Key = {fds349134}
}

@article{fds349200,
   Author = {Chang, Z and DI Martino and JM and Qiu, Q and Espinosa, S and Sapiro,
             G},
   Title = {Salgaze: Personalizing gaze estimation using visual
             saliency},
   Journal = {Proceedings - 2019 International Conference on Computer
             Vision Workshop, ICCVW 2019},
   Pages = {1169-1178},
   Year = {2019},
   Month = {October},
   url = {http://dx.doi.org/10.1109/ICCVW.2019.00148},
   Abstract = {Traditional gaze estimation methods typically require
             explicit user calibration to achieve high accuracy. This
             process is cumbersome and recalibration is often required
             when there are changes in factors such as illumination and
             pose. To address this challenge, we introduce SalGaze, a
             framework that utilizes saliency information in the visual
             content to transparently adapt the gaze estimation algorithm
             to the user without explicit user calibration. We design an
             algorithm to transform a saliency map into a differentiable
             loss map that can be used for the optimization of CNN-based
             models. SalGaze is also able to greatly augment standard
             point calibration data with implicit video saliency
             calibration data using a unified framework. We show accuracy
             improvements over 24% using our technique on existing
             methods.},
   Doi = {10.1109/ICCVW.2019.00148},
   Key = {fds349200}
}

@article{fds346624,
   Author = {Simhal, AK and Zuo, Y and Perez, MM and Madison, DV and Sapiro, G and Micheva, KD},
   Title = {Multifaceted Changes in Synaptic Composition and Astrocytic
             Involvement in a Mouse Model of Fragile X
             Syndrome.},
   Journal = {Scientific reports},
   Volume = {9},
   Number = {1},
   Pages = {13855},
   Year = {2019},
   Month = {September},
   url = {http://dx.doi.org/10.1038/s41598-019-50240-x},
   Abstract = {Fragile X Syndrome (FXS), a common inheritable form of
             intellectual disability, is known to alter neocortical
             circuits. However, its impact on the diverse synapse types
             comprising these circuits, or on the involvement of
             astrocytes, is not well known. We used immunofluorescent
             array tomography to quantify different synaptic populations
             and their association with astrocytes in layers 1 through 4
             of the adult somatosensory cortex of a FXS mouse model, the
             FMR1 knockout mouse. The collected multi-channel data
             contained approximately 1.6 million synapses which were
             analyzed using a probabilistic synapse detector. Our study
             reveals complex, synapse-type and layer specific changes in
             the neocortical circuitry of FMR1 knockout mice. We report
             an increase of small glutamatergic VGluT1 synapses in layer
             4 accompanied by a decrease in large VGluT1 synapses in
             layers 1 and 4. VGluT2 synapses show a rather consistent
             decrease in density in layers 1 and 2/3. In all layers, we
             observe the loss of large inhibitory synapses. Lastly,
             astrocytic association of excitatory synapses decreases. The
             ability to dissect the circuit deficits by synapse type and
             astrocytic involvement will be crucial for understanding how
             these changes affect circuit function, and ultimately
             defining targets for therapeutic intervention.},
   Doi = {10.1038/s41598-019-50240-x},
   Key = {fds346624}
}

@article{fds348083,
   Author = {Martinez, N and Bertran, M and Sapiro, G and Wu, HT},
   Title = {Non-Contact Photoplethysmogram and Instantaneous Heart Rate
             Estimation from Infrared Face Video},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {2019-September},
   Pages = {2020-2024},
   Year = {2019},
   Month = {September},
   ISBN = {9781538662496},
   url = {http://dx.doi.org/10.1109/ICIP.2019.8803109},
   Abstract = {Extracting the instantaneous heart rate (iHR) from face
             videos has been well studied in recent years. It is well
             known that changes in skin color due to blood flow can be
             captured using conventional cameras. One of the main
             limitations of methods that rely on this principle is the
             need of an illumination source. Moreover, they have to be
             able to operate under different light conditions. One way to
             avoid these constraints is using infrared cameras, allowing
             the monitoring of iHR under low light conditions. In this
             work, we present a simple, principled signal extraction
             method that recovers the iHR from infrared face videos. We
             tested the procedure on 7 participants, for whom we recorded
             an electrocardiogram simultaneously with their infrared face
             video. We checked that the recovered signal matched the
             ground truth iHR, showing that infrared is a promising
             alternative to conventional video imaging for heart rate
             monitoring, especially in low light conditions. Code is
             available at https://github.com/natalialmg/IR-iHR.},
   Doi = {10.1109/ICIP.2019.8803109},
   Key = {fds348083}
}

@article{fds340484,
   Author = {Asiedu, MN and Simhal, A and Chaudhary, U and Mueller, JL and Lam, CT and Schmitt, JW and Venegas, G and Sapiro, G and Ramanujam,
             N},
   Title = {Development of Algorithms for Automated Detection of
             Cervical Pre-Cancers With a Low-Cost, Point-of-Care, Pocket
             Colposcope.},
   Journal = {IEEE Trans Biomed Eng},
   Volume = {66},
   Number = {8},
   Pages = {2306-2318},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2019},
   Month = {August},
   url = {http://dx.doi.org/10.1109/TBME.2018.2887208},
   Abstract = {GOAL: In this paper, we propose methods for (1) automatic
             feature extraction and classification for acetic acid and
             Lugol's iodine cervigrams and (2) methods for combining
             features/diagnosis of different contrasts in cervigrams for
             improved performance. METHODS: We developed algorithms to
             pre-process pathology-labeled cervigrams and extract simple
             but powerful color and textural-based features. The features
             were used to train a support vector machine model to
             classify cervigrams based on corresponding pathology for
             visual inspection with acetic acid, visual inspection with
             Lugol's iodine, and a combination of the two contrasts.
             RESULTS: The proposed framework achieved a sensitivity,
             specificity, and accuracy of 81.3%, 78.6%, and 80.0%,
             respectively, when used to distinguish cervical
             intraepithelial neoplasia (CIN+) relative to normal and
             benign tissues. This is superior to the average values
             achieved by three expert physicians on the same data set for
             discriminating normal/benign cases from CIN+ (77%
             sensitivity, 51% specificity, and 63% accuracy). CONCLUSION:
             The results suggest that utilizing simple color- and
             textural-based features from visual inspection with acetic
             acid and visual inspection with Lugol's iodine images may
             provide unbiased automation of cervigrams. SIGNIFICANCE:
             This would enable automated, expert-level diagnosis of
             cervical pre-cancer at the point of care.},
   Doi = {10.1109/TBME.2018.2887208},
   Key = {fds340484}
}

@article{fds349513,
   Author = {Cheng, X and Qiu, Q and Calderbank, R and Sapiro,
             G},
   Title = {RotDCF: Decomposition of convolutional filters for
             rotation-equivariant deep networks},
   Year = {2019},
   Month = {May},
   Abstract = {Explicit encoding of group actions in deep features makes it
             possible for convolutional neural networks (CNNs) to handle
             global deformations of images, which is critical to success
             in many vision tasks. This paper proposes to decompose the
             convolutional filters over joint steerable bases across the
             space and the group geometry simultaneously, namely a
             rotation-equivariant CNN with decomposed convolutional
             filters (RotDCF). This decomposition facilitates computing
             the joint convolution, which is proved to be necessary for
             the group equivariance. It significantly reduces the model
             size and computational complexity while preserving
             performance, and truncation of the bases expansion serves
             implicitly to regularize the filters. On datasets involving
             in-plane and out-of-plane object rotations, RotDCF deep
             features demonstrate greater robustness and interpretability
             than regular CNNs. The stability of the equivariant
             representation to input variations is also proved
             theoretically. The RotDCF framework can be extended to
             groups other than rotations, providing a general approach
             which achieves both group equivariance and representation
             stability at a reduced model size.},
   Key = {fds349513}
}

@article{fds341351,
   Author = {Dawson, G and Sapiro, G},
   Title = {Potential for Digital Behavioral Measurement Tools to
             Transform the Detection and Diagnosis of Autism Spectrum
             Disorder.},
   Journal = {JAMA Pediatr},
   Volume = {173},
   Number = {4},
   Pages = {305-306},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1001/jamapediatrics.2018.5269},
   Doi = {10.1001/jamapediatrics.2018.5269},
   Key = {fds341351}
}

@article{fds335963,
   Author = {Campbell, K and Carpenter, KL and Hashemi, J and Espinosa, S and Marsan,
             S and Borg, JS and Chang, Z and Qiu, Q and Vermeer, S and Adler, E and Tepper,
             M and Egger, HL and Baker, JP and Sapiro, G and Dawson,
             G},
   Title = {Computer vision analysis captures atypical attention in
             toddlers with autism.},
   Journal = {Autism},
   Volume = {23},
   Number = {3},
   Pages = {619-628},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1177/1362361318766247},
   Abstract = {To demonstrate the capability of computer vision analysis to
             detect atypical orienting and attention behaviors in
             toddlers with autism spectrum disorder. One hundered and
             four toddlers of 16-31 months old (mean = 22)
             participated in this study. Twenty-two of the toddlers had
             autism spectrum disorder and 82 had typical development or
             developmental delay. Toddlers watched video stimuli on a
             tablet while the built-in camera recorded their head
             movement. Computer vision analysis measured participants'
             attention and orienting in response to name calls.
             Reliability of the computer vision analysis algorithm was
             tested against a human rater. Differences in behavior were
             analyzed between the autism spectrum disorder group and the
             comparison group. Reliability between computer vision
             analysis and human coding for orienting to name was
             excellent (intra-class coefficient 0.84, 95% confidence
             interval 0.67-0.91). Only 8% of toddlers with autism
             spectrum disorder oriented to name calling on >1 trial,
             compared to 63% of toddlers in the comparison group
             (p = 0.002). Mean latency to orient was significantly
             longer for toddlers with autism spectrum disorder (2.02 vs
             1.06 s, p = 0.04). Sensitivity for autism spectrum
             disorder of atypical orienting was 96% and specificity was
             38%. Older toddlers with autism spectrum disorder showed
             less attention to the videos overall (p = 0.03).
             Automated coding offers a reliable, quantitative method for
             detecting atypical social orienting and reduced sustained
             attention in toddlers with autism spectrum
             disorder.},
   Doi = {10.1177/1362361318766247},
   Key = {fds335963}
}

@article{fds341551,
   Author = {Shamir, RR and Duchin, Y and Kim, J and Patriat, R and Marmor, O and Bergman, H and Vitek, JL and Sapiro, G and Bick, A and Eliahou, R and Eitan, R and Israel, Z and Harel, N},
   Title = {Microelectrode Recordings Validate the Clinical
             Visualization of Subthalamic-Nucleus Based on 7T Magnetic
             Resonance Imaging and Machine Learning for Deep Brain
             Stimulation Surgery.},
   Journal = {Neurosurgery},
   Volume = {84},
   Number = {3},
   Pages = {749-757},
   Year = {2019},
   Month = {March},
   url = {http://dx.doi.org/10.1093/neuros/nyy212},
   Abstract = {<h4>Background</h4>Deep brain stimulation (DBS) of the
             subthalamic nucleus (STN) is a proven and effective therapy
             for the management of the motor symptoms of Parkinson's
             disease (PD). While accurate positioning of the stimulating
             electrode is critical for success of this therapy, precise
             identification of the STN based on imaging can be
             challenging. We developed a method to accurately visualize
             the STN on a standard clinical magnetic resonance imaging
             (MRI). The method incorporates a database of 7-Tesla (T)
             MRIs of PD patients together with machine-learning methods
             (hereafter 7 T-ML).<h4>Objective</h4>To validate the
             clinical application accuracy of the 7 T-ML method by
             comparing it with identification of the STN based on
             intraoperative microelectrode recordings.<h4>Methods</h4>Sixteen
             PD patients who underwent microelectrode-recordings guided
             STN DBS were included in this study (30 implanted leads and
             electrode trajectories). The length of the STN along the
             electrode trajectory and the position of its contacts to
             dorsal, inside, or ventral to the STN were compared using
             microelectrode-recordings and the 7 T-ML method computed
             based on the patient's clinical 3T MRI.<h4>Results</h4>All
             30 electrode trajectories that intersected the STN based on
             microelectrode-recordings, also intersected it when
             visualized with the 7 T-ML method. STN trajectory average
             length was 6.2 ± 0.7 mm based on microelectrode
             recordings and 5.8 ± 0.9 mm for the 7 T-ML method. We
             observed a 93% agreement regarding contact location between
             the microelectrode-recordings and the 7 T-ML
             method.<h4>Conclusion</h4>The 7 T-ML method is highly
             consistent with microelectrode-recordings data. This method
             provides a reliable and accurate patient-specific prediction
             for targeting the STN.},
   Doi = {10.1093/neuros/nyy212},
   Key = {fds341551}
}

@article{fds346893,
   Author = {Sapiro, G and Hashemi, J and Dawson, G},
   Title = {Computer vision and behavioral phenotyping: an autism case
             study.},
   Journal = {Curr Opin Biomed Eng},
   Volume = {9},
   Pages = {14-20},
   Year = {2019},
   Month = {March},
   url = {http://dx.doi.org/10.1016/j.cobme.2018.12.002},
   Abstract = {Despite significant recent advances in molecular genetics
             and neuroscience, behavioral ratings based on clinical
             observations are still the gold standard for screening,
             diagnosing, and assessing outcomes in neurodevelopmental
             disorders, including autism spectrum disorder. Such
             behavioral ratings are subjective, require significant
             clinician expertise and training, typically do not capture
             data from the children in their natural environments such as
             homes or schools, and are not scalable for large population
             screening, low-income communities, or longitudinal
             monitoring, all of which are critical for outcome evaluation
             in multisite studies and for understanding and evaluating
             symptoms in the general population. The development of
             computational approaches to standardized objective
             behavioral assessment is, thus, a significant unmet need in
             autism spectrum disorder in particular and developmental and
             neurodegenerative disorders in general. Here, we discuss how
             computer vision, and machine learning, can develop scalable
             low-cost mobile health methods for automatically and
             consistently assessing existing biomarkers, from eye
             tracking to movement patterns and affect, while also
             providing tools and big data for novel discovery.},
   Doi = {10.1016/j.cobme.2018.12.002},
   Key = {fds346893}
}

@article{fds339597,
   Author = {Kim, J and Duchin, Y and Shamir, RR and Patriat, R and Vitek, J and Harel,
             N and Sapiro, G},
   Title = {Automatic localization of the subthalamic nucleus on
             patient-specific clinical MRI by incorporating 7 T MRI and
             machine learning: Application in deep brain
             stimulation.},
   Journal = {Human brain mapping},
   Volume = {40},
   Number = {2},
   Pages = {679-698},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1002/hbm.24404},
   Abstract = {Deep brain stimulation (DBS) of the subthalamic nucleus
             (STN) has shown clinical potential for relieving the motor
             symptoms of advanced Parkinson's disease. While accurate
             localization of the STN is critical for consistent
             across-patients effective DBS, clear visualization of the
             STN under standard clinical MR protocols is still
             challenging. Therefore, intraoperative microelectrode
             recordings (MER) are incorporated to accurately localize the
             STN. However, MER require significant neurosurgical
             expertise and lengthen the surgery time. Recent advances in
             7 T MR technology facilitate the ability to clearly
             visualize the STN. The vast majority of centers, however,
             still do not have 7 T MRI systems, and fewer have the
             ability to collect and analyze the data. This work
             introduces an automatic STN localization framework based on
             standard clinical MRIs without additional cost in the
             current DBS planning protocol. Our approach benefits from a
             large database of 7 T MRI and its clinical MRI pairs. We
             first model in the 7 T database, using efficient machine
             learning algorithms, the spatial and geometric dependency
             between the STN and its adjacent structures (predictors).
             Given a standard clinical MRI, our method automatically
             computes the predictors and uses the learned information to
             predict the patient-specific STN. We validate our proposed
             method on clinical T<sub>2</sub> W MRI of 80 subjects,
             comparing with experts-segmented STNs from the corresponding
             7 T MRI pairs. The experimental results show that our
             framework provides more accurate and robust patient-specific
             STN localization than using state-of-the-art atlases. We
             also demonstrate the clinical feasibility of the proposed
             technique assessing the post-operative electrode active
             contact locations.},
   Doi = {10.1002/hbm.24404},
   Key = {fds339597}
}

@article{fds343752,
   Author = {Azami, H and Arnold, SE and Sanei, S and Chang, Z and Sapiro, G and Escudero, J and Gupta, AS},
   Title = {Multiscale fluctuation-based dispersion entropy and its
             applications to neurological diseases},
   Journal = {IEEE Access},
   Volume = {7},
   Pages = {68718-68733},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ACCESS.2019.2918560},
   Abstract = {Fluctuation-based dispersion entropy (FDispEn) is a new
             approach to estimate the dynamical variability of the
             fluctuations of signals. It is based on Shannon entropy and
             fluctuation-based dispersion patterns. To quantify the
             physiological dynamics over multiple time scales, multiscale
             FDispEn (MFDE) is developed in this paper. MFDE is robust to
             the presence of baseline wanders or trends in the data. We
             evaluate MFDE, compared with popular multiscale sample
             entropy (MSE), multiscale fuzzy entropy (MFE), and the
             recently introduced multiscale dispersion entropy (MDE), on
             selected synthetic data and five neurological diseases'
             datasets: 1) focal and non-focal electroencephalograms
             (EEGs); 2) walking stride interval signals for young,
             elderly, and Parkinson's subjects; 3) stride interval
             fluctuations for Huntington's disease and amyotrophic
             lateral sclerosis; 4) EEGs for controls and Alzheimer's
             disease patients; and 5) eye movement data for Parkinson's
             disease and ataxia. The MFDE avoids the problem of the
             undefined MSE values and, compared with the MFE and MSE,
             leads to more stable entropy values over the scale factors
             for white and pink noises. Overall, the MFDE is the fastest
             and most consistent method for the discrimination of
             different states of neurological data, especially where the
             mean value of a time series considerably changes along with
             the signal (e.g., eye movement data). This paper shows that
             MFDE is a relevant new metric to gain further insights into
             the dynamics of neurological diseases' recordings. The
             MATLAB codes for the MFDE and its refined composite form are
             available in Xplore.},
   Doi = {10.1109/ACCESS.2019.2918560},
   Key = {fds343752}
}

@article{fds345821,
   Author = {Cheng, X and Qiu, Q and Calderbank, R and Sapiro,
             G},
   Title = {RoTDCF: Decomposition of convolutional filters for
             rotation-equivariant deep networks},
   Journal = {7th International Conference on Learning Representations,
             ICLR 2019},
   Year = {2019},
   Month = {January},
   Abstract = {© 7th International Conference on Learning Representations,
             ICLR 2019. All Rights Reserved. Explicit encoding of group
             actions in deep features makes it possible for convolutional
             neural networks (CNNs) to handle global deformations of
             images, which is critical to success in many vision tasks.
             This paper proposes to decompose the convolutional filters
             over joint steerable bases across the space and the group
             geometry simultaneously, namely a rotation-equivariant CNN
             with decomposed convolutional filters (RotDCF). This
             decomposition facilitates computing the joint convolution,
             which is proved to be necessary for the group equivariance.
             It significantly reduces the model size and computational
             complexity while preserving performance, and truncation of
             the bases expansion serves implicitly to regularize the
             filters. On datasets involving in-plane and out-of-plane
             object rotations, RotDCF deep features demonstrate greater
             robustness and interpretability than regular CNNs. The
             stability of the equivariant representation to input
             variations is also proved theoretically. The RotDCF
             framework can be extended to groups other than rotations,
             providing a general approach which achieves both group
             equivariance and representation stability at a reduced model
             size.},
   Key = {fds345821}
}

@article{fds348084,
   Author = {Fellous, J-M and Sapiro, G and Rossi, A and Mayberg, H and Ferrante,
             M},
   Title = {Explainable Artificial Intelligence for Neuroscience:
             Behavioral Neurostimulation.},
   Journal = {Frontiers in neuroscience},
   Volume = {13},
   Pages = {1346},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.3389/fnins.2019.01346},
   Abstract = {The use of Artificial Intelligence and machine learning in
             basic research and clinical neuroscience is increasing. AI
             methods enable the interpretation of large multimodal
             datasets that can provide unbiased insights into the
             fundamental principles of brain function, potentially paving
             the way for earlier and more accurate detection of brain
             disorders and better informed intervention protocols.
             Despite AI's ability to create accurate predictions and
             classifications, in most cases it lacks the ability to
             provide a mechanistic understanding of how inputs and
             outputs relate to each other. Explainable Artificial
             Intelligence (XAI) is a new set of techniques that attempts
             to provide such an understanding, here we report on some of
             these practical approaches. We discuss the potential value
             of XAI to the field of neurostimulation for both basic
             scientific inquiry and therapeutic purposes, as well as,
             outstanding questions and obstacles to the success of the
             XAI approach.},
   Doi = {10.3389/fnins.2019.01346},
   Key = {fds348084}
}

@article{fds348434,
   Author = {Bertran, M and Martinez, N and Papadaki, A and Qiu, Q and Rodrigues, M and Reeves, G and Sapiro, G},
   Title = {Adversarially learned representations for information
             obfuscation and inference},
   Journal = {36th International Conference on Machine Learning, ICML
             2019},
   Volume = {2019-June},
   Pages = {960-974},
   Year = {2019},
   Month = {January},
   ISBN = {9781510886988},
   Abstract = {Data collection and sharing are pervasive aspects of modern
             society. This process can either be voluntary, as in the
             case of a person taking a facial image to unlock his/her
             phone, or incidental, such as traffic cameras collecting
             videos on pedestrians. An undesirable side effect of these
             processes is that shared data can carry information about
             attributes that users might consider as sensitive, even when
             such information is of limited use for the task. It is
             therefore desirable for both data collectors and users to
             design procedures that minimize sensitive information
             leakage. Balancing the competing objectives of providing
             meaningful individualized service levels and inference while
             obfuscating sensitive information is still an open problem.
             In this work, we take an information theoretic approach that
             is implemented as an unconstrained adversarial game between
             Deep Neural Networks in a principled, data-driven manner.
             This approach enables us to learn domain-preserving
             stochastic transformations that maintain performance on
             existing algorithms while minimizing sensitive information
             leakage.},
   Key = {fds348434}
}

@article{fds342170,
   Author = {Lezama, J and Qiu, Q and Musé, P and Sapiro, G},
   Title = {OLE: Orthogonal Low-rank Embedding, A Plug and Play
             Geometric Loss for Deep Learning},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Pages = {8109-8118},
   Year = {2018},
   Month = {December},
   url = {http://dx.doi.org/10.1109/CVPR.2018.00846},
   Abstract = {Deep neural networks trained using a softmax layer at the
             top and the cross-entropy loss are ubiquitous tools for
             image classification. Yet, this does not naturally enforce
             intra-class similarity nor inter-class margin of the learned
             deep representations. To simultaneously achieve these two
             goals, different solutions have been proposed in the
             literature, such as the pairwise or triplet losses. However,
             these carry the extra task of selecting pairs or triplets,
             and the extra computational burden of computing and learning
             for many combinations of them. In this paper, we propose a
             plug-and-play loss term for deep networks that explicitly
             reduces intra-class variance and enforces inter-class margin
             simultaneously, in a simple and elegant geometric manner.
             For each class, the deep features are collapsed into a
             learned linear subspace, or union of them, and inter-class
             subspaces are pushed to be as orthogonal as possible. Our
             proposed Orthogonal Low-rank Embedding (OLÉ) does not
             require carefully crafting pairs or triplets of samples for
             training, and works standalone as a classification loss,
             being the first reported deep metric learning framework of
             its kind. Because of the improved margin between features of
             different classes, the resulting deep networks generalize
             better, are more discriminative, and more robust. We
             demonstrate improved classification performance in general
             object recognition, plugging the proposed loss term into
             existing off-the-shelf architectures. In particular, we show
             the advantage of the proposed loss in the small data/model
             scenario, and we significantly advance the state-of-the-art
             on the Stanford STL-10 benchmark.},
   Doi = {10.1109/CVPR.2018.00846},
   Key = {fds342170}
}

@article{fds342169,
   Author = {Zhu, W and Qiu, Q and Huang, J and Calderbank, R and Sapiro, G and Daubechies, I},
   Title = {LDMNet: Low Dimensional Manifold Regularized Neural
             Networks},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Pages = {2743-2751},
   Year = {2018},
   Month = {December},
   url = {http://dx.doi.org/10.1109/CVPR.2018.00290},
   Abstract = {Deep neural networks have proved very successful on
             archetypal tasks for which large training sets are
             available, but when the training data are scarce, their
             performance suffers from overfitting. Many existing methods
             of reducing overfitting are data-independent. Data-dependent
             regularizations are mostly motivated by the observation that
             data of interest lie close to a manifold, which is typically
             hard to parametrize explicitly. These methods usually only
             focus on the geometry of the input data, and do not
             necessarily encourage the networks to produce geometrically
             meaningful features. To resolve this, we propose the
             Low-Dimensional-Manifold-regularized neural Network
             (LDMNet), which incorporates a feature regularization method
             that focuses on the geometry of both the input data and the
             output features. In LDMNet, we regularize the network by
             encouraging the combination of the input data and the output
             features to sample a collection of low dimensional
             manifolds, which are searched efficiently without explicit
             parametrization. To achieve this, we directly use the
             manifold dimension as a regularization term in a variational
             functional. The resulting Euler-Lagrange equation is a
             Laplace-Beltrami equation over a point cloud, which is
             solved by the point integral method without increasing the
             computational complexity. In the experiments, we show that
             LDMNet significantly outperforms widely-used regularizers.
             Moreover, LDMNet can extract common features of an object
             imaged via different modalities, which is very useful in
             real-world applications such as cross-spectral face
             recognition.},
   Doi = {10.1109/CVPR.2018.00290},
   Key = {fds342169}
}

@article{fds339768,
   Author = {Dawson, G and Campbell, K and Hashemi, J and Lippmann, SJ and Smith, V and Carpenter, K and Egger, H and Espinosa, S and Vermeer, S and Baker, J and Sapiro, G},
   Title = {Atypical postural control can be detected via computer
             vision analysis in toddlers with autism spectrum
             disorder.},
   Journal = {Sci Rep},
   Volume = {8},
   Number = {1},
   Pages = {17008},
   Year = {2018},
   Month = {November},
   url = {http://dx.doi.org/10.1038/s41598-018-35215-8},
   Abstract = {Evidence suggests that differences in motor function are an
             early feature of autism spectrum disorder (ASD). One aspect
             of motor ability that develops during childhood is postural
             control, reflected in the ability to maintain a steady head
             and body position without excessive sway. Observational
             studies have documented differences in postural control in
             older children with ASD. The present study used computer
             vision analysis to assess midline head postural control, as
             reflected in the rate of spontaneous head movements during
             states of active attention, in 104 toddlers between 16-31
             months of age (Mean = 22 months), 22 of whom were
             diagnosed with ASD. Time-series data revealed robust group
             differences in the rate of head movements while the toddlers
             watched movies depicting social and nonsocial stimuli.
             Toddlers with ASD exhibited a significantly higher rate of
             head movement as compared to non-ASD toddlers, suggesting
             difficulties in maintaining midline position of the head
             while engaging attentional systems. The use of digital
             phenotyping approaches, such as computer vision analysis, to
             quantify variation in early motor behaviors will allow for
             more precise, objective, and quantitative characterization
             of early motor signatures and potentially provide new
             automated methods for early autism risk identification.},
   Doi = {10.1038/s41598-018-35215-8},
   Key = {fds339768}
}

@article{fds339259,
   Author = {Aguerrebere, C and Delbracio, M and Bartesaghi, A and Sapiro,
             G},
   Title = {A Practical Guide to Multi-Image Alignment},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2018-April},
   Pages = {1927-1931},
   Publisher = {IEEE},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.1109/ICASSP.2018.8461588},
   Abstract = {Multi-image alignment, bringing a group of images into
             common register, is an ubiquitous problem and the first step
             of many applications in a wide variety of domains. As a
             result, a great amount of effort is being invested in
             developing efficient multi-image alignment algorithms.
             Little has been done, however, to answer fundamental
             practical questions such as: what is the comparative
             performance of existing methods? is there still room for
             improvement? under which conditions should one technique be
             preferred over another? does adding more images or prior
             image information improve the registration results? In this
             work, we present a thorough analysis and evaluation of the
             main multi-image alignment methods which, combined with
             theoretical limits in multi-image alignment performance,
             allows us to organize them under a common framework and
             provide practical answers to these essential
             questions.},
   Doi = {10.1109/ICASSP.2018.8461588},
   Key = {fds339259}
}

@article{fds339260,
   Author = {Ahn, HK and Qiu, Q and Bosch, E and Thompson, A and Robles, FE and Sapiro,
             G and Warren, WS and Calderbank, R},
   Title = {Classifying Pump-Probe Images of Melanocytic Lesions Using
             the WEYL Transform},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2018-April},
   Pages = {4209-4213},
   Publisher = {IEEE},
   Year = {2018},
   Month = {September},
   ISBN = {9781538646588},
   url = {http://dx.doi.org/10.1109/ICASSP.2018.8461298},
   Abstract = {Diagnosis of melanoma is fraught with uncertainty, and
             discordance rates among physicians remain high because of
             the lack of a definitive criterion. Motivated by this
             challenge, this paper first introduces the Patch Weyl
             transform (PWT), a 2-dimensional variant of the Weyl
             transform. It then presents a method for classifying
             pump-probe images of melanocytic lesions based on the PWT
             coefficients. Performance of the PWT coefficients is shown
             to be superior to classification based on baseline
             intensity, on standard descriptors such as the Histogram of
             Oriented Gradients (HOG) and Local Binary Patterns (LBP),
             and on coefficients derived from PCA and Fourier
             representations of the data.},
   Doi = {10.1109/ICASSP.2018.8461298},
   Key = {fds339260}
}

@article{fds339261,
   Author = {Giryes, R and Eldar, YC and Bronstein, AM and Sapiro,
             G},
   Title = {The Learned Inexact Project Gradient Descent
             Algorithm},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2018-April},
   Pages = {6767-6771},
   Publisher = {IEEE},
   Year = {2018},
   Month = {September},
   ISBN = {9781538646588},
   url = {http://dx.doi.org/10.1109/ICASSP.2018.8462136},
   Abstract = {Accelerating iterative algorithms for solving inverse
             problems using neural networks have become a very popular
             strategy in the recent years. In this work, we propose a
             theoretical analysis that may provide an explanation for its
             success. Our theory relies on the usage of inexact
             projections with the projected gradient descent (PGD)
             method. It is demonstrated in various problems including
             image super-resolution.},
   Doi = {10.1109/ICASSP.2018.8462136},
   Key = {fds339261}
}

@article{fds335962,
   Author = {Bartesaghi, A and Aguerrebere, C and Falconieri, V and Banerjee, S and Earl, LA and Zhu, X and Grigorieff, N and Milne, JLS and Sapiro, G and Wu,
             X and Subramaniam, S},
   Title = {Atomic Resolution Cryo-EM Structure of β-Galactosidase.},
   Journal = {Structure (London, England : 1993)},
   Volume = {26},
   Number = {6},
   Pages = {848-856.e3},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.1016/j.str.2018.04.004},
   Abstract = {The advent of direct electron detectors has enabled the
             routine use of single-particle cryo-electron microscopy (EM)
             approaches to determine structures of a variety of protein
             complexes at near-atomic resolution. Here, we report the
             development of methods to account for local variations in
             defocus and beam-induced drift, and the implementation of a
             data-driven dose compensation scheme that significantly
             improves the extraction of high-resolution information
             recorded during exposure of the specimen to the electron
             beam. These advances enable determination of a cryo-EM
             density map for β-galactosidase bound to the inhibitor
             phenylethyl β-D-thiogalactopyranoside where the ordered
             regions are resolved at a level of detail seen in X-ray maps
             at ∼ 1.5 Å resolution. Using this density map in
             conjunction with constrained molecular dynamics simulations
             provides a measure of the local flexibility of the
             non-covalently bound inhibitor and offers further
             opportunities for structure-guided inhibitor
             design.},
   Doi = {10.1016/j.str.2018.04.004},
   Key = {fds335962}
}

@article{fds332366,
   Author = {Giryes, R and Eldar, YC and Bronstein, AM and Sapiro,
             G},
   Title = {Tradeoffs between convergence speed and reconstruction
             accuracy in inverse problems},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {66},
   Number = {7},
   Pages = {1676-1690},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1109/TSP.2018.2791945},
   Abstract = {Solving inverse problems with iterative algorithms is
             popular, especially for large data. Due to time constraints,
             the number of possible iterations is usually limited,
             potentially affecting the achievable accuracy. Given an
             error one is willing to tolerate, an important question is
             whether it is possible to modify the original iterations to
             obtain faster convergence to a minimizer achieving the
             allowed error without increasing the computational cost of
             each iteration considerably. Relying on recent recovery
             techniques developed for settings in which the desired
             signal belongs to some low-dimensional set, we show that
             using a coarse estimate of this set may lead to faster
             convergence at the cost of an additional reconstruction
             error related to the accuracy of the set approximation. Our
             theory ties to recent advances in sparse recovery,
             compressed sensing, and deep learning. Particularly, it may
             provide a possible explanation to the successful
             approximation of the 1 -minimization solution by neural
             networks with layers representing iterations, as practiced
             in the learned iterative shrinkage-thresholding
             algorithm.},
   Doi = {10.1109/TSP.2018.2791945},
   Key = {fds332366}
}

@article{fds332805,
   Author = {Vu, M-AT and Adalı, T and Ba, D and Buzsáki, G and Carlson, D and Heller,
             K and Liston, C and Rudin, C and Sohal, VS and Widge, AS and Mayberg, HS and Sapiro, G and Dzirasa, K},
   Title = {A Shared Vision for Machine Learning in Neuroscience.},
   Journal = {J Neurosci},
   Volume = {38},
   Number = {7},
   Pages = {1601-1607},
   Publisher = {Society for Neuroscience},
   Year = {2018},
   Month = {February},
   url = {http://dx.doi.org/10.1523/JNEUROSCI.0508-17.2018},
   Abstract = {With ever-increasing advancements in technology,
             neuroscientists are able to collect data in greater volumes
             and with finer resolution. The bottleneck in understanding
             how the brain works is consequently shifting away from the
             amount and type of data we can collect and toward what we
             actually do with the data. There has been a growing interest
             in leveraging this vast volume of data across levels of
             analysis, measurement techniques, and experimental paradigms
             to gain more insight into brain function. Such efforts are
             visible at an international scale, with the emergence of big
             data neuroscience initiatives, such as the BRAIN initiative
             (Bargmann et al., 2014), the Human Brain Project, the Human
             Connectome Project, and the National Institute of Mental
             Health's Research Domain Criteria initiative. With these
             large-scale projects, much thought has been given to
             data-sharing across groups (Poldrack and Gorgolewski, 2014;
             Sejnowski et al., 2014); however, even with such
             data-sharing initiatives, funding mechanisms, and
             infrastructure, there still exists the challenge of how to
             cohesively integrate all the data. At multiple stages and
             levels of neuroscience investigation, machine learning holds
             great promise as an addition to the arsenal of analysis
             tools for discovering how the brain works.},
   Doi = {10.1523/JNEUROSCI.0508-17.2018},
   Key = {fds332805}
}

@article{fds327666,
   Author = {Pisharady, PK and Sotiropoulos, SN and Duarte-Carvajalino, JM and Sapiro, G and Lenglet, C},
   Title = {Estimation of white matter fiber parameters from compressed
             multiresolution diffusion MRI using sparse Bayesian
             learning.},
   Journal = {NeuroImage},
   Volume = {167},
   Pages = {488-503},
   Year = {2018},
   Month = {February},
   url = {http://dx.doi.org/10.1016/j.neuroimage.2017.06.052},
   Abstract = {We present a sparse Bayesian unmixing algorithm BusineX:
             Bayesian Unmixing for Sparse Inference-based Estimation of
             Fiber Crossings (X), for estimation of white matter fiber
             parameters from compressed (under-sampled) diffusion MRI
             (dMRI) data. BusineX combines compressive sensing with
             linear unmixing and introduces sparsity to the previously
             proposed multiresolution data fusion algorithm RubiX,
             resulting in a method for improved reconstruction,
             especially from data with lower number of diffusion
             gradients. We formulate the estimation of fiber parameters
             as a sparse signal recovery problem and propose a linear
             unmixing framework with sparse Bayesian learning for the
             recovery of sparse signals, the fiber orientations and
             volume fractions. The data is modeled using a parametric
             spherical deconvolution approach and represented using a
             dictionary created with the exponential decay components
             along different possible diffusion directions. Volume
             fractions of fibers along these directions define the
             dictionary weights. The proposed sparse inference, which is
             based on the dictionary representation, considers the
             sparsity of fiber populations and exploits the spatial
             redundancy in data representation, thereby facilitating
             inference from under-sampled q-space. The algorithm improves
             parameter estimation from dMRI through data-dependent local
             learning of hyperparameters, at each voxel and for each
             possible fiber orientation, that moderate the strength of
             priors governing the parameter variances. Experimental
             results on synthetic and in-vivo data show improved accuracy
             with a lower uncertainty in fiber parameter estimates.
             BusineX resolves a higher number of second and third fiber
             crossings. For under-sampled data, the algorithm is also
             shown to produce more reliable estimates.},
   Doi = {10.1016/j.neuroimage.2017.06.052},
   Key = {fds327666}
}

@article{fds339262,
   Author = {Simhal, AK and Gong, B and Trimmer, JS and Weinberg, RJ and Smith, SJ and Sapiro, G and Micheva, KD},
   Title = {A Computational Synaptic Antibody Characterization Tool for
             Array Tomography.},
   Journal = {Frontiers in neuroanatomy},
   Volume = {12},
   Pages = {51},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.3389/fnana.2018.00051},
   Abstract = {Application-specific validation of antibodies is a critical
             prerequisite for their successful use. Here we introduce an
             automated framework for characterization and screening of
             antibodies against synaptic molecules for high-resolution
             immunofluorescence array tomography (AT). The proposed
             Synaptic Antibody Characterization Tool (SACT) is designed
             to provide an automatic, robust, flexible, and efficient
             tool for antibody characterization at scale. SACT
             automatically detects puncta of immunofluorescence labeling
             from candidate antibodies and determines whether a punctum
             belongs to a synapse. The molecular composition and size of
             the target synapses expected to contain the antigen is
             determined by the user, based on biological knowledge.
             Operationally, the presence of a synapse is defined by the
             colocalization or adjacency of the candidate antibody
             punctum to one or more reference antibody puncta. The
             outputs of SACT are automatically computed measurements such
             as target synapse density and target specificity ratio that
             reflect the sensitivity and specificity of immunolabeling
             with a given candidate antibody. These measurements provide
             an objective way to characterize and compare the performance
             of different antibodies against the same target, and can be
             used to objectively select the antibodies best suited for AT
             and potentially for other immunolabeling
             applications.},
   Doi = {10.3389/fnana.2018.00051},
   Key = {fds339262}
}

@article{fds335968,
   Author = {Bertrán, MA and Martínez, NL and Wang, Y and Dunson, D and Sapiro, G and Ringach, D},
   Title = {Active learning of cortical connectivity from two-photon
             imaging data.},
   Journal = {PloS one},
   Volume = {13},
   Number = {5},
   Pages = {e0196527},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1371/journal.pone.0196527},
   Abstract = {Understanding how groups of neurons interact within a
             network is a fundamental question in system neuroscience.
             Instead of passively observing the ongoing activity of a
             network, we can typically perturb its activity, either by
             external sensory stimulation or directly via techniques such
             as two-photon optogenetics. A natural question is how to use
             such perturbations to identify the connectivity of the
             network efficiently. Here we introduce a method to infer
             sparse connectivity graphs from in-vivo, two-photon imaging
             of population activity in response to external stimuli. A
             novel aspect of the work is the introduction of a
             recommended distribution, incrementally learned from the
             data, to optimally refine the inferred network. Unlike
             existing system identification techniques, this "active
             learning" method automatically focuses its attention on key
             undiscovered areas of the network, instead of targeting
             global uncertainty indicators like parameter variance. We
             show how active learning leads to faster inference while, at
             the same time, provides confidence intervals for the network
             parameters. We present simulations on artificial small-world
             networks to validate the methods and apply the method to
             real data. Analysis of frequency of motifs recovered show
             that cortical networks are consistent with a small-world
             topology model.},
   Doi = {10.1371/journal.pone.0196527},
   Key = {fds335968}
}

@article{fds335966,
   Author = {Asiedu, MN and Simhal, A and Lam, CT and Mueller, J and Chaudhary, U and Schmitt, JW and Sapiro, G and Ramanujam, N},
   Title = {Image processing and machine learning techniques to automate
             diagnosis of Lugol's iodine cervigrams for a low-cost
             point-of-care digital colposcope},
   Journal = {Progress in Biomedical Optics and Imaging - Proceedings of
             SPIE},
   Volume = {10485},
   Publisher = {SPIE},
   Year = {2018},
   Month = {January},
   ISBN = {9781510614550},
   url = {http://dx.doi.org/10.1117/12.2282792},
   Abstract = {The world health organization recommends visual inspection
             with acetic acid (VIA) and/or Lugol's Iodine (VILI) for
             cervical cancer screening in low-resource settings. Human
             interpretation of diagnostic indicators for visual
             inspection is qualitative, subjective, and has high
             inter-observer discordance, which could lead both to adverse
             outcomes for the patient and unnecessary follow-ups. In this
             work, we a simple method for automatic feature extraction
             and classification for Lugol's Iodine cervigrams acquired
             with a low-cost, miniature, digital colposcope. Algorithms
             to preprocess expert physician-labelled cervigrams and to
             extract simple but powerful color-based features are
             introduced. The features are used to train a support vector
             machine model to classify cervigrams based on expert
             physician labels. The selected framework achieved a
             sensitivity, specificity, and accuracy of 89.2%, 66.7% and
             80.6% with majority diagnosis of the expert physicians in
             discriminating cervical intraepithelial neoplasia (CIN +)
             relative to normal tissues. The proposed classifier also
             achieved an area under the curve of 84 when trained with
             majority diagnosis of the expert physicians. The results
             suggest that utilizing simple color-based features may
             enable unbiased automation of VILI cervigrams, opening the
             door to a full system of low-cost data acquisition
             complemented with automatic interpretation.},
   Doi = {10.1117/12.2282792},
   Key = {fds335966}
}

@article{fds337693,
   Author = {Duchin, Y and Shamir, RR and Patriat, R and Kim, J and Vitek, JL and Sapiro, G and Harel, N},
   Title = {Patient-specific anatomical model for deep brain stimulation
             based on 7 Tesla MRI.},
   Journal = {PloS one},
   Volume = {13},
   Number = {8},
   Pages = {e0201469},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1371/journal.pone.0201469},
   Abstract = {<h4>Objective</h4>Deep brain stimulation (DBS) requires
             accurate localization of the anatomical target structure,
             and the precise placement of the DBS electrode within it.
             Ultra-high field 7 Tesla (T) MR images can be utilized to
             create patient-specific anatomical 3D models of the
             subthalamic nuclei (STN) to enhance pre-surgical DBS
             targeting as well as post-surgical visualization of the DBS
             lead position and orientation. We validated the accuracy of
             the 7T imaging-based patient-specific model of the STN and
             measured the variability of the location and dimensions
             across movement disorder patients.<h4>Methods</h4>72
             patients who underwent DBS surgery were scanned
             preoperatively on 7T MRI. Segmentations and 3D volume
             rendering of the STN were generated for all patients. For 21
             STN-DBS cases, microelectrode recording (MER) was used to
             validate the segmentation. For 12 cases, we computed the
             correlation between the overlap of the STN and volume of
             tissue activated (VTA) and the monopolar review for a
             further validation of the model's accuracy and its clinical
             relevancy.<h4>Results</h4>We successfully reconstructed and
             visualized the STN in all patients. Significant variability
             was found across individuals regarding the location of the
             STN center of mass as well as its volume, length, depth and
             width. Significant correlations were found between MER and
             the 7T imaging-based model of the STN (r = 0.86) and VTA-STN
             overlap and the monopolar review outcome (r =
             0.61).<h4>Conclusion</h4>The results suggest that an
             accurate visualization and localization of a
             patient-specific 3D model of the STN can be generated based
             on 7T MRI. The imaging-based 7T MRI STN model was validated
             using MER and patient's clinical outcomes. The significant
             variability observed in the STN location and shape based on
             a large number of patients emphasizes the importance of an
             accurate direct visualization of the STN for DBS targeting.
             An accurate STN localization can facilitate postoperative
             stimulation parameters for optimized patient
             outcome.},
   Doi = {10.1371/journal.pone.0201469},
   Key = {fds337693}
}

@article{fds339596,
   Author = {Qiu, Q and Lezama, J and Bronstein, A and Sapiro,
             G},
   Title = {ForestHash: Semantic Hashing with Shallow Random Forests and
             Tiny Convolutional Networks},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {11206 LNCS},
   Pages = {442-459},
   Publisher = {Springer International Publishing},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1007/978-3-030-01216-8_27},
   Abstract = {In this paper, we introduce a random forest semantic hashing
             scheme that embeds tiny convolutional neural networks (CNN)
             into shallow random forests. A binary hash code for a data
             point is obtained by a set of decision trees, setting
             ‘1’ for the visited tree leaf, and ‘0’ for the rest.
             We propose to first randomly group arriving classes at each
             tree split node into two groups, obtaining a significantly
             simplified two-class classification problem that can be a
             handled with a light-weight CNN weak learner. Code
             uniqueness is achieved via the random class grouping, whilst
             code consistency is achieved using a low-rank loss in the
             CNN weak learners that encourages intra-class compactness
             for the two random class groups. Finally, we introduce an
             information-theoretic approach for aggregating codes of
             individual trees into a single hash code, producing a
             near-optimal unique hash for each class. The proposed
             approach significantly outperforms state-of-the-art hashing
             methods for image retrieval tasks on large-scale public
             datasets, and is comparable to image classification methods
             while utilizing a more compact, efficient and scalable
             representation. This work proposes a principled and robust
             procedure to train and deploy in parallel an ensemble of
             light-weight CNNs, instead of simply going
             deeper.},
   Doi = {10.1007/978-3-030-01216-8_27},
   Key = {fds339596}
}

@article{fds340082,
   Author = {Qiu, Q and Cheng, X and Calderbank, R and Sapiro,
             G},
   Title = {DCFNet: Deep Neural Network with Decomposed Convolutional
             Filters},
   Journal = {35th International Conference on Machine Learning, ICML
             2018},
   Volume = {9},
   Pages = {6687-6696},
   Year = {2018},
   Month = {January},
   Abstract = {Filters in a Convolutional Neural Network (CNN) contain
             model parameters learned from enormous amounts of data. In
             this paper, we suggest to decompose convolutional filters in
             CNN as a truncated expansion with pre-fixed bases, namely
             the Decomposed Convolutional Filters network (DCFNet), where
             the expansion coefficients remain learned from data. Such a
             structure not only reduces the number of trainable
             parameters and computation, but also imposes filter
             regularity by bases truncation. Through extensive
             experiments, we consistently observe that DCFNet maintains
             accuracy for image classification tasks with a significant
             reduction of model parameters, particularly with
             Fourier-Bessel (FB) bases, and even with random bases.
             Theoretically, we analyze the representation stability of
             DCFNet with respect to input variations, and prove
             representation stability under generic assumptions on the
             expansion coefficients. The analysis is consistent with the
             empirical observations.},
   Key = {fds340082}
}

@article{fds335967,
   Author = {Chiew, KS and Hashemi, J and Gans, LK and Lerebours, L and Clement, NJ and Vu, M-AT and Sapiro, G and Heller, NE and Adcock,
             RA},
   Title = {Motivational valence alters memory formation without
             altering exploration of a real-life spatial
             environment.},
   Journal = {PLoS One},
   Volume = {13},
   Number = {3},
   Pages = {e0193506},
   Year = {2018},
   url = {http://dx.doi.org/10.1371/journal.pone.0193506},
   Abstract = {Volitional exploration and learning are key to adaptive
             behavior, yet their characterization remains a complex
             problem for cognitive science. Exploration has been posited
             as a mechanism by which motivation promotes memory, but this
             relationship is not well-understood, in part because novel
             stimuli that motivate exploration also reliably elicit
             changes in neuromodulatory brain systems that directly alter
             memory formation, via effects on neural plasticity. To
             deconfound interrelationships between motivation,
             exploration, and memory formation we manipulated
             motivational state prior to entering a spatial context,
             measured exploratory responses to the context and novel
             stimuli within it, and then examined motivation and
             exploration as predictors of memory outcomes. To elicit
             spontaneous exploration, we used the physical space of an
             art exhibit with affectively rich content; we expected
             motivated exploration and memory to reflect multiple
             factors, including not only motivational valence, but also
             individual differences. Motivation was manipulated via an
             introductory statement framing exhibit themes in terms of
             Promotion- or Prevention-oriented goals. Participants
             explored the exhibit while being tracked by video. They
             returned 24 hours later for recall and spatial memory tests,
             followed by measures of motivation, personality, and
             relevant attitude variables. Promotion and Prevention
             condition participants did not differ in terms of
             group-level exploration time or memory metrics, suggesting
             similar motivation to explore under both framing contexts.
             However, exploratory behavior and memory outcomes were
             significantly more closely related under Promotion than
             Prevention, indicating that Prevention framing disrupted
             expected depth-of-encoding effects. Additionally, while
             trait measures predicted exploration similarly across
             framing conditions, traits interacted with motivational
             framing context and facial affect to predict memory
             outcomes. This novel characterization of motivated learning
             implies that dissociable behavioral and biological
             mechanisms, here varying as a function of valence,
             contribute to memory outcomes in complex, real-life
             environments.},
   Doi = {10.1371/journal.pone.0193506},
   Key = {fds335967}
}

@article{fds351210,
   Author = {Egger, HL and Dawson, G and Hashemi, J and Carpenter, KLH and Espinosa,
             S and Campbell, K and Brotkin, S and Schaich-Borg, J and Qiu, Q and Tepper,
             M and Baker, JP and Bloomfield, RA and Sapiro, G},
   Title = {Automatic emotion and attention analysis of young children
             at home: a ResearchKit autism feasibility
             study.},
   Journal = {NPJ Digit Med},
   Volume = {1},
   Pages = {20},
   Year = {2018},
   url = {http://dx.doi.org/10.1038/s41746-018-0024-6},
   Abstract = {Current tools for objectively measuring young children's
             observed behaviors are expensive, time-consuming, and
             require extensive training and professional administration.
             The lack of scalable, reliable, and validated tools impacts
             access to evidence-based knowledge and limits our capacity
             to collect population-level data in non-clinical settings.
             To address this gap, we developed mobile technology to
             collect videos of young children while they watched movies
             designed to elicit autism-related behaviors and then used
             automatic behavioral coding of these videos to quantify
             children's emotions and behaviors. We present results from
             our iPhone study Autism & Beyond, built on ResearchKit's
             open-source platform. The entire study-from an e-Consent
             process to stimuli presentation and data collection-was
             conducted within an iPhone-based app available in the Apple
             Store. Over 1 year, 1756 families with children aged 12-72
             months old participated in the study, completing 5618
             caregiver-reported surveys and uploading 4441 videos
             recorded in the child's natural settings. Usable data were
             collected on 87.6% of the uploaded videos. Automatic coding
             identified significant differences in emotion and attention
             by age, sex, and autism risk status. This study demonstrates
             the acceptability of an app-based tool to caregivers, their
             willingness to upload videos of their children, the
             feasibility of caregiver-collected data in the home, and the
             application of automatic behavioral encoding to quantify
             emotions and attention variables that are clinically
             meaningful and may be refined to screen children for autism
             and developmental disorders outside of clinical settings.
             This technology has the potential to transform how we screen
             and monitor children's development.},
   Doi = {10.1038/s41746-018-0024-6},
   Key = {fds351210}
}

@article{fds335969,
   Author = {Lezama, J and Qiu, Q and Sapiro, G},
   Title = {Not afraid of the dark: NIR-VIS face recognition via
             cross-spectral hallucination and low-rank
             embedding},
   Journal = {Proceedings - 30th IEEE Conference on Computer Vision and
             Pattern Recognition, CVPR 2017},
   Volume = {2017-January},
   Pages = {6807-6816},
   Publisher = {IEEE},
   Year = {2017},
   Month = {November},
   url = {http://dx.doi.org/10.1109/CVPR.2017.720},
   Abstract = {Surveillance cameras today often capture NIR (near infrared)
             images in low-light environments. However, most face
             datasets accessible for training and verification are only
             collected in the VIS (visible light) spectrum. It remains a
             challenging problem to match NIR to VIS face images due to
             the different light spectrum. Recently, breakthroughs have
             been made for VIS face recognition by applying deep learning
             on a huge amount of labeled VIS face samples. The same deep
             learning approach cannot be simply applied to NIR face
             recognition for two main reasons: First, much limited NIR
             face images are available for training compared to the VIS
             spectrum. Second, face galleries to be matched are mostly
             available only in the VIS spectrum. In this paper, we
             propose an approach to extend the deep learning breakthrough
             for VIS face recognition to the NIR spectrum, without
             retraining the underlying deep models that see only VIS
             faces. Our approach consists of two core components,
             cross-spectral hallucination and low-rank embedding, to
             optimize respectively input and output of a VIS deep model
             for cross-spectral face recognition. Cross-spectral
             hallucination produces VIS faces from NIR images through a
             deep learning approach. Low-rank embedding restores a
             low-rank structure for faces deep features across both NIR
             and VIS spectrum. We observe that it is often equally
             effective to perform hallucination to input NIR images or
             low-rank embedding to output deep features for a VIS deep
             model for cross-spectral recognition. When hallucination and
             low-rank embedding are deployed together, we observe
             significant further improvement; we obtain state-of-the-art
             accuracy on the CASIA NIR-VIS v2.0 benchmark, without the
             need at all to re-train the recognition system.},
   Doi = {10.1109/CVPR.2017.720},
   Key = {fds335969}
}

@article{fds335970,
   Author = {Ye, Q and Zhang, T and Ke, W and Qiu, Q and Chen, J and Sapiro, G and Zhang,
             B},
   Title = {Self-learning scene-specific pedestrian detectors using a
             progressive latent model},
   Journal = {Proceedings - 30th IEEE Conference on Computer Vision and
             Pattern Recognition, CVPR 2017},
   Volume = {2017-January},
   Pages = {2057-2066},
   Publisher = {IEEE},
   Year = {2017},
   Month = {November},
   url = {http://dx.doi.org/10.1109/CVPR.2017.222},
   Abstract = {In this paper, a self-learning approach is proposed towards
             solving scene-specific pedestrian detection problem without
             any human' annotation involved. The self-learning approach
             is deployed as progressive steps of object discovery, object
             enforcement, and label propagation. In the learning
             procedure, object locations in each frame are treated as
             latent variables that are solved with a progressive latent
             model (PLM). Compared with conventional latent models, the
             proposed PLM incorporates a spatial regularization term to
             reduce ambiguities in object proposals and to enforce object
             localization, and also a graph-based label propagation to
             discover harder instances in adjacent frames. With the
             difference of convex (DC) objective functions, PLM can be
             efficiently optimized with a concave-convex programming and
             thus guaranteeing the stability of self-learning. Extensive
             experiments demonstrate that even without annotation the
             proposed self-learning approach outperforms weakly
             supervised learning approaches, while achieving comparable
             performance with transfer learning and fully supervised
             approaches.},
   Doi = {10.1109/CVPR.2017.222},
   Key = {fds335970}
}

@article{fds335971,
   Author = {Su, S and Delbracio, M and Wang, J and Sapiro, G and Heidrich, W and Wang,
             O},
   Title = {Deep video deblurring for hand-held cameras},
   Journal = {Proceedings - 30th IEEE Conference on Computer Vision and
             Pattern Recognition, CVPR 2017},
   Volume = {2017-January},
   Pages = {237-246},
   Publisher = {IEEE},
   Year = {2017},
   Month = {November},
   ISBN = {9781538604571},
   url = {http://dx.doi.org/10.1109/CVPR.2017.33},
   Abstract = {Motion blur from camera shake is a major problem in videos
             captured by hand-held devices. Unlike single-image
             deblurring, video-based approaches can take advantage of the
             abundant information that exists across neighboring frames.
             As a result the best performing methods rely on the
             alignment of nearby frames. However, aligning images is a
             computationally expensive and fragile procedure, and methods
             that aggregate information must therefore be able to
             identify which regions have been accurately aligned and
             which have not, a task that requires high level scene
             understanding. In this work, we introduce a deep learning
             solution to video deblurring, where a CNN is trained
             end-toend to learn how to accumulate information across
             frames. To train this network, we collected a dataset of
             real videos recorded with a high frame rate camera, which we
             use to generate synthetic motion blur for supervision. We
             show that the features learned from this dataset extend to
             deblurring motion blur that arises due to camera shake in a
             wide range of videos, and compare the quality of results to
             a number of other baselines.},
   Doi = {10.1109/CVPR.2017.33},
   Key = {fds335971}
}

@article{fds335972,
   Author = {Tepper, M and Sapiro, G},
   Title = {Nonnegative matrix underapproximation for robust multiple
             model fitting},
   Journal = {Proceedings - 30th IEEE Conference on Computer Vision and
             Pattern Recognition, CVPR 2017},
   Volume = {2017-January},
   Pages = {655-663},
   Publisher = {IEEE},
   Year = {2017},
   Month = {November},
   ISBN = {9781538604571},
   url = {http://dx.doi.org/10.1109/CVPR.2017.77},
   Abstract = {In this work, we introduce a highly efficient algorithm to
             address the nonnegative matrix underapproximation (NMU)
             problem, i.e., nonnegative matrix factorization (NMF) with
             an additional underapproximation constraint. NMU results are
             interesting as, compared to traditional NMF, they present
             additional sparsity and part-based behavior, explaining
             unique data features. To show these features in practice, we
             first present an application to the analysis of climate
             data. We then present an NMU-based algorithm to robustly fit
             multiple parametric models to a dataset. The proposed
             approach delivers state-of-the-art results for the
             estimation of multiple fundamental matrices and
             homographies, outperforming other alternatives in the
             literature and exemplifying the use of efficient NMU
             computations.},
   Doi = {10.1109/CVPR.2017.77},
   Key = {fds335972}
}

@article{fds329136,
   Author = {Pisharady, PK and Sotiropoulos, SN and Sapiro, G and Lenglet,
             C},
   Title = {A Sparse Bayesian Learning Algorithm for White Matter
             Parameter Estimation from Compressed Multi-shell Diffusion
             MRI.},
   Journal = {Medical image computing and computer-assisted intervention :
             MICCAI ... International Conference on Medical Image
             Computing and Computer-Assisted Intervention},
   Volume = {10433},
   Pages = {602-610},
   Year = {2017},
   Month = {September},
   ISBN = {9783319661810},
   url = {http://dx.doi.org/10.1007/978-3-319-66182-7_69},
   Abstract = {We propose a sparse Bayesian learning algorithm for improved
             estimation of white matter fiber parameters from compressed
             (under-sampled q-space) multi-shell diffusion MRI data. The
             multi-shell data is represented in a dictionary form using a
             non-monoexponential decay model of diffusion, based on
             continuous gamma distribution of diffusivities. The fiber
             volume fractions with predefined orientations, which are the
             unknown parameters, form the dictionary weights. These
             unknown parameters are estimated with a linear un-mixing
             framework, using a sparse Bayesian learning algorithm. A
             localized learning of hyperparameters at each voxel and for
             each possible fiber orientations improves the parameter
             estimation. Our experiments using synthetic data from the
             ISBI 2012 HARDI reconstruction challenge and in-vivo data
             from the Human Connectome Project demonstrate the
             improvements.},
   Doi = {10.1007/978-3-319-66182-7_69},
   Key = {fds329136}
}

@article{fds329481,
   Author = {Sokolić, J and Giryes, R and Sapiro, G and Rodrigues,
             MRD},
   Title = {Generalization error of deep neural networks: Role of
             classification margin and data structure},
   Journal = {2017 12th International Conference on Sampling Theory and
             Applications, SampTA 2017},
   Pages = {147-151},
   Publisher = {IEEE},
   Year = {2017},
   Month = {September},
   ISBN = {9781538615652},
   url = {http://dx.doi.org/10.1109/SAMPTA.2017.8024476},
   Abstract = {Understanding the generalization properties of deep learning
             models is critical for their successful usage in many
             applications, especially in the regimes where the number of
             training samples is limited. We study the generalization
             properties of deep neural networks (DNNs) via the Jacobian
             matrix of the network. Our analysis is general to arbitrary
             network structures, types of non-linearities and pooling
             operations. We show that bounding the spectral norm of the
             Jacobian matrix in the network reduces the generalization
             error. In addition, we tie this error to the invariance in
             the data and the network. Experiments on the MNIST and
             ImageNet datasets support these findings. This short paper
             summarizes our generalization error theorems for DNNs and
             for general invariant classifiers [1], [2].},
   Doi = {10.1109/SAMPTA.2017.8024476},
   Key = {fds329481}
}

@article{fds328865,
   Author = {Sokolić, J and Giryes, R and Sapiro, G and Rodrigues,
             MRD},
   Title = {Robust Large Margin Deep Neural Networks},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {65},
   Number = {16},
   Pages = {4265-4280},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2017},
   Month = {August},
   url = {http://dx.doi.org/10.1109/TSP.2017.2708039},
   Abstract = {The generalization error of deep neural networks via their
             classification margin is studied in this paper. Our approach
             is based on the Jacobian matrix of a deep neural network and
             can be applied to networks with arbitrary nonlinearities and
             pooling layers, and to networks with different architectures
             such as feed forward networks and residual networks. Our
             analysis leads to the conclusion that a bounded spectral
             norm of the network's Jacobian matrix in the neighbourhood
             of the training samples is crucial for a deep neural network
             of arbitrary depth and width to generalize well. This is a
             significant improvement over the current bounds in the
             literature, which imply that the generalization error grows
             with either the width or the depth of the network. Moreover,
             it shows that the recently proposed batch normalization and
             weight normalization reparametrizations enjoy good
             generalization properties, and leads to a novel network
             regularizer based on the network's Jacobian matrix. The
             analysis is supported with experimental results on the
             MNIST, CIFAR-10, LaRED, and ImageNet datasets.},
   Doi = {10.1109/TSP.2017.2708039},
   Key = {fds328865}
}

@article{fds335964,
   Author = {Qiu, Q and Hashemi, J and Sapiro, G},
   Title = {Intelligent synthesis driven model calibration: framework
             and face recognition application},
   Journal = {Proceedings - 2017 IEEE International Conference on Computer
             Vision Workshops, ICCVW 2017},
   Volume = {2018-January},
   Pages = {2564-2572},
   Publisher = {IEEE},
   Year = {2017},
   Month = {July},
   ISBN = {9781538610343},
   url = {http://dx.doi.org/10.1109/ICCVW.2017.301},
   Abstract = {Deep Neural Networks (DNNs) that achieve state-of-the-art
             results are still prone to suffer performance degradation
             when deployed in many real-world scenarios due to shifts
             between the training and deployment domains. Limited data
             from a given setting can be enriched through synthesis, then
             used to calibrate a pre-trained DNN to improve the
             performance in the setting. Most enrichment approaches try
             to generate as much data as possible; however, this blind
             approach is computationally expensive and can lead to
             generating redundant data. Contrary to this, we develop
             synthesis, here exemplified for faces, methods and propose
             information-driven approaches to exploit and optimally
             select face synthesis types both at training and testing. We
             show that our approaches, without re-designing a new DNN,
             lead to more efficient training and improved performance. We
             demonstrate the effectiveness of our approaches by
             calibrating a state-of-the-art DNN to two challenging face
             recognition datasets.},
   Doi = {10.1109/ICCVW.2017.301},
   Key = {fds335964}
}

@article{fds335965,
   Author = {Sokolić, J and Qiu, Q and Rodrigues, MRD and Sapiro,
             G},
   Title = {Learning to identify while failing to discriminate},
   Journal = {Proceedings - 2017 IEEE International Conference on Computer
             Vision Workshops, ICCVW 2017},
   Volume = {2018-January},
   Pages = {2537-2544},
   Publisher = {IEEE},
   Year = {2017},
   Month = {July},
   ISBN = {9781538610343},
   url = {http://dx.doi.org/10.1109/ICCVW.2017.298},
   Abstract = {Privacy and fairness are critical in computer vision
             applications, in particular when dealing with human
             identification. Achieving a universally secure, private, and
             fair systems is practically impossible as the exploitation
             of additional data can reveal private information in the
             original one. Faced with this challenge, we propose a new
             line of research, where the privacy is learned and used in a
             closed environment. The goal is to ensure that a given
             entity, trusted to infer certain information with our data,
             is blocked from inferring protected information from it. We
             design a system that learns to succeed on the positive task
             while simultaneously fail at the negative one, and
             illustrate this with challenging cases where the positive
             task (face verification) is harder than the negative one
             (gender classification). The framework opens the door to
             privacy and fairness in very important closed scenarios,
             ranging from private data accumulation companies to
             law-enforcement and hospitals.},
   Doi = {10.1109/ICCVW.2017.298},
   Key = {fds335965}
}

@article{fds323853,
   Author = {Campbell, K and Carpenter, KLH and Espinosa, S and Hashemi, J and Qiu,
             Q and Tepper, M and Calderbank, R and Sapiro, G and Egger, HL and Baker,
             JP and Dawson, G},
   Title = {Use of a Digital Modified Checklist for Autism in Toddlers -
             Revised with Follow-up to Improve Quality of Screening for
             Autism.},
   Journal = {J Pediatr},
   Volume = {183},
   Pages = {133-139.e1},
   Year = {2017},
   Month = {April},
   url = {http://dx.doi.org/10.1016/j.jpeds.2017.01.021},
   Abstract = {OBJECTIVES: To assess changes in quality of care for
             children at risk for autism spectrum disorders (ASD) due to
             process improvement and implementation of a digital
             screening form. STUDY DESIGN: The process of screening for
             ASD was studied in an academic primary care pediatrics
             clinic before and after implementation of a digital version
             of the Modified Checklist for Autism in Toddlers - Revised
             with Follow-up with automated risk assessment. Quality
             metrics included accuracy of documentation of screening
             results and appropriate action for positive screens
             (secondary screening or referral). Participating physicians
             completed pre- and postintervention surveys to measure
             changes in attitudes toward feasibility and value of
             screening for ASD. Evidence of change was evaluated with
             statistical process control charts and χ2 tests. RESULTS:
             Accurate documentation in the electronic health record of
             screening results increased from 54% to 92% (38% increase,
             95% CI 14%-64%) and appropriate action for children
             screening positive increased from 25% to 85% (60% increase,
             95% CI 35%-85%). A total of 90% of participating physicians
             agreed that the transition to a digital screening form
             improved their clinical assessment of autism risk.
             CONCLUSIONS: Implementation of a tablet-based digital
             version of the Modified Checklist for Autism in Toddlers -
             Revised with Follow-up led to improved quality of care for
             children at risk for ASD and increased acceptability of
             screening for ASD. Continued efforts towards improving the
             process of screening for ASD could facilitate rapid, early
             diagnosis of ASD and advance the accuracy of studies of the
             impact of screening.},
   Doi = {10.1016/j.jpeds.2017.01.021},
   Key = {fds323853}
}

@article{fds326146,
   Author = {Simhal, AK and Aguerrebere, C and Collman, F and Vogelstein, JT and Micheva, KD and Weinberg, RJ and Smith, SJ and Sapiro,
             G},
   Title = {Probabilistic fluorescence-based synapse
             detection.},
   Journal = {PLoS computational biology},
   Volume = {13},
   Number = {4},
   Pages = {e1005493},
   Year = {2017},
   Month = {April},
   url = {http://dx.doi.org/10.1371/journal.pcbi.1005493},
   Abstract = {Deeper exploration of the brain's vast synaptic networks
             will require new tools for high-throughput structural and
             molecular profiling of the diverse populations of synapses
             that compose those networks. Fluorescence microscopy (FM)
             and electron microscopy (EM) offer complementary advantages
             and disadvantages for single-synapse analysis. FM combines
             exquisite molecular discrimination capacities with high
             speed and low cost, but rigorous discrimination between
             synaptic and non-synaptic fluorescence signals is
             challenging. In contrast, EM remains the gold standard for
             reliable identification of a synapse, but offers only
             limited molecular discrimination and is slow and costly. To
             develop and test single-synapse image analysis methods, we
             have used datasets from conjugate array tomography (cAT),
             which provides voxel-conjugate FM and EM (annotated) images
             of the same individual synapses. We report a novel
             unsupervised probabilistic method for detection of synapses
             from multiplex FM (muxFM) image data, and evaluate this
             method both by comparison to EM gold standard annotated data
             and by examining its capacity to reproduce known important
             features of cortical synapse distributions. The proposed
             probabilistic model-based synapse detector accepts
             molecular-morphological synapse models as user queries, and
             delivers a volumetric map of the probability that each voxel
             represents part of a synapse. Taking human annotation of cAT
             EM data as ground truth, we show that our algorithm detects
             synapses from muxFM data alone as successfully as human
             annotators seeing only the muxFM data, and accurately
             reproduces known architectural features of cortical synapse
             distributions. This approach opens the door to data-driven
             discovery of new synapse types and their density. We suggest
             that our probabilistic synapse detector will also be useful
             for analysis of standard confocal and super-resolution FM
             images, where EM cross-validation is not
             practical.},
   Doi = {10.1371/journal.pcbi.1005493},
   Key = {fds326146}
}

@article{fds324086,
   Author = {Chen, J and Chang, Z and Qiu, Q and Li, X and Sapiro, G and Bronstein, A and Pietikäinen, M},
   Title = {RealSense = real heart rate: Illumination invariant heart
             rate estimation from videos},
   Journal = {2016 6th International Conference on Image Processing
             Theory, Tools and Applications, IPTA 2016},
   Publisher = {IEEE},
   Year = {2017},
   Month = {January},
   ISBN = {9781467389105},
   url = {http://dx.doi.org/10.1109/IPTA.2016.7820970},
   Abstract = {Recent studies validated the feasibility of estimating heart
             rate from human faces in RGB video. However, test subjects
             are often recorded under controlled conditions, as
             illumination variations significantly affect the RGB-based
             heart rate estimation accuracy. Intel newly-announced
             low-cost RealSense 3D (RGBD) camera is becoming ubiquitous
             in laptops and mobile devices starting this year, opening
             the door to new and more robust computer vision. RealSense
             cameras produce RGB images with extra depth information
             inferred from a latent near-infrared (NIR) channel. In this
             paper, we experimentally demonstrate, for the first time,
             that heart rate can be reliably estimated from RealSense
             near-infrared images. This enables illumination invariant
             heart rate estimation, extending the heart rate from video
             feasibility to low-light applications, such as night
             driving. With the (coming) ubiquitous presence of RealSense
             devices, the proposed method not only utilizes its
             near-infrared channel, designed originally to be hidden from
             consumers; but also exploits the associated depth
             information for improved robustness to head
             pose.},
   Doi = {10.1109/IPTA.2016.7820970},
   Key = {fds324086}
}

@article{fds326840,
   Author = {Gunalan, K and Chaturvedi, A and Howell, B and Duchin, Y and Lempka, SF and Patriat, R and Sapiro, G and Harel, N and McIntyre,
             CC},
   Title = {Creating and parameterizing patient-specific deep brain
             stimulation pathway-activation models using the hyperdirect
             pathway as an example.},
   Journal = {PloS one},
   Volume = {12},
   Number = {4},
   Pages = {e0176132},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.1371/journal.pone.0176132},
   Abstract = {<h4>Background</h4>Deep brain stimulation (DBS) is an
             established clinical therapy and computational models have
             played an important role in advancing the technology.
             Patient-specific DBS models are now common tools in both
             academic and industrial research, as well as clinical
             software systems. However, the exact methodology for
             creating patient-specific DBS models can vary substantially
             and important technical details are often missing from
             published reports.<h4>Objective</h4>Provide a detailed
             description of the assembly workflow and parameterization of
             a patient-specific DBS pathway-activation model (PAM) and
             predict the response of the hyperdirect pathway to clinical
             stimulation.<h4>Methods</h4>Integration of multiple software
             tools (e.g. COMSOL, MATLAB, FSL, NEURON, Python) enables the
             creation and visualization of a DBS PAM. An example DBS PAM
             was developed using 7T magnetic resonance imaging data from
             a single unilaterally implanted patient with Parkinson's
             disease (PD). This detailed description implements our best
             computational practices and most elaborate parameterization
             steps, as defined from over a decade of technical
             evolution.<h4>Results</h4>Pathway recruitment curves and
             strength-duration relationships highlight the non-linear
             response of axons to changes in the DBS parameter
             settings.<h4>Conclusion</h4>Parameterization of
             patient-specific DBS models can be highly detailed and
             constrained, thereby providing confidence in the simulation
             predictions, but at the expense of time demanding technical
             implementation steps. DBS PAMs represent new tools for
             investigating possible correlations between brain pathway
             activation patterns and clinical symptom
             modulation.},
   Doi = {10.1371/journal.pone.0176132},
   Key = {fds326840}
}

@article{fds344654,
   Author = {Sokolić, J and Giryes, R and Sapiro, G and Rodrigues,
             MRD},
   Title = {Generalization error of invariant classifiers},
   Journal = {Proceedings of the 20th International Conference on
             Artificial Intelligence and Statistics, AISTATS
             2017},
   Year = {2017},
   Month = {January},
   Abstract = {Copyright 2017 by the author(s). This paper studies the
             generalization error of invariant classifiers. In
             particular, we consider the common scenario where the
             classification task is invariant to certain transformations
             of the input, and that the classifier is constructed (or
             learned) to be invariant to these transformations. Our
             approach relies on factoring the input space into a product
             of a base space and a set of transformations. We show that
             whereas the generalization error of a non-invariant
             classifier is proportional to the complexity of the input
             space, the generalization error of an invariant classifier
             is proportional to the complexity of the base space. We also
             derive a set of sufficient conditions on the geometry of the
             base space and the set of transformations that ensure that
             the complexity of the base space is much smaller than the
             complexity of the input space. Our analysis applies to
             general classifiers such as convolutional neural networks.
             We demonstrate the implications of the developed theory for
             such classifiers with experiments on the MNIST and CIFAR-10
             datasets.},
   Key = {fds344654}
}

@article{fds349514,
   Author = {Sokolić, J and Giryes, R and Sapiro, G and Rodrigues,
             MRD},
   Title = {Generalization error of invariant classifiers},
   Journal = {Proceedings of the 20th International Conference on
             Artificial Intelligence and Statistics, AISTATS
             2017},
   Year = {2017},
   Month = {January},
   Abstract = {This paper studies the generalization error of invariant
             classifiers. In particular, we consider the common scenario
             where the classification task is invariant to certain
             transformations of the input, and that the classifier is
             constructed (or learned) to be invariant to these
             transformations. Our approach relies on factoring the input
             space into a product of a base space and a set of
             transformations. We show that whereas the generalization
             error of a non-invariant classifier is proportional to the
             complexity of the input space, the generalization error of
             an invariant classifier is proportional to the complexity of
             the base space. We also derive a set of sufficient
             conditions on the geometry of the base space and the set of
             transformations that ensure that the complexity of the base
             space is much smaller than the complexity of the input
             space. Our analysis applies to general classifiers such as
             convolutional neural networks. We demonstrate the
             implications of the developed theory for such classifiers
             with experiments on the MNIST and CIFAR-10
             datasets.},
   Key = {fds349514}
}

@article{fds322212,
   Author = {Lezama, J and Mukherjee, D and McNabb, RP and Sapiro, G and Kuo, AN and Farsiu, S},
   Title = {Segmentation guided registration of wide field-of-view
             retinal optical coherence tomography volumes.},
   Journal = {Biomed Opt Express},
   Volume = {7},
   Number = {12},
   Pages = {4827-4846},
   Year = {2016},
   Month = {December},
   url = {http://dx.doi.org/10.1364/BOE.7.004827},
   Abstract = {Patient motion artifacts are often visible in densely
             sampled or large wide field-of-view (FOV) retinal optical
             coherence tomography (OCT) volumes. A popular strategy for
             reducing motion artifacts is to capture two orthogonally
             oriented volumetric scans. However, due to larger volume
             sizes, longer acquisition times, and corresponding larger
             motion artifacts, the registration of wide FOV scans remains
             a challenging problem. In particular, gaps in data
             acquisition due to eye motion, such as saccades, can be
             significant and their modeling becomes critical for
             successful registration. In this article, we develop a
             complete computational pipeline for the automatic motion
             correction and accurate registration of wide FOV
             orthogonally scanned OCT images of the human retina. The
             proposed framework utilizes the retinal boundary
             segmentation as a guide for registration and requires only a
             minimal transformation of the acquired data to produce a
             successful registration. It includes saccade detection and
             correction, a custom version of the optical flow algorithm
             for dense lateral registration and a linear optimization
             approach for axial registration. Utilizing a wide FOV swept
             source OCT system, we acquired retinal volumes of 12
             subjects and we provide qualitative and quantitative
             experimental results to validate the state-of-the-art
             effectiveness of the proposed technique. The source code
             corresponding to the proposed algorithm is available
             online.},
   Doi = {10.1364/BOE.7.004827},
   Key = {fds322212}
}

@article{fds322672,
   Author = {Aguerrebere, C and Delbracio, M and Bartesaghi, A and Sapiro,
             G},
   Title = {Fundamental limits in multi-image alignment},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {64},
   Number = {21},
   Pages = {5707-5722},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2016},
   Month = {November},
   url = {http://dx.doi.org/10.1109/TSP.2016.2600517},
   Abstract = {The performance of multiimage alignment, bringing different
             images into one coordinate system, is critical in many
             applications with varied signal-to-noise ratio (SNR)
             conditions. A great amount of effort is being invested into
             developing methods to solve this problem. Several important
             questions thus arise, including: Which are the fundamental
             limits in multiimage alignment performance? Does having
             access to more images improve the alignment? Theoretical
             bounds provide a fundamental benchmark to compare methods
             and can help establish whether improvements can be made. In
             this work, we tackle the problem of finding the performance
             limits in image registration when multiple shifted and noisy
             observations are available. We derive and analyze the
             Cramér-Rao and Ziv-Zakai lower bounds under different
             statistical models for the underlying image. We show the
             existence of different behavior zones depending on the
             difficulty level of the problem, given by the SNR conditions
             of the input images. The analysis we present here brings
             further insight into the fundamental limitations of the
             multiimage alignment problem.},
   Doi = {10.1109/TSP.2016.2600517},
   Key = {fds322672}
}

@article{fds322673,
   Author = {Elhamifar, E and Sapiro, G and Sastry, SS},
   Title = {Dissimilarity-Based Sparse Subset Selection.},
   Journal = {IEEE transactions on pattern analysis and machine
             intelligence},
   Volume = {38},
   Number = {11},
   Pages = {2182-2197},
   Year = {2016},
   Month = {November},
   url = {http://dx.doi.org/10.1109/tpami.2015.2511748},
   Abstract = {Finding an informative subset of a large collection of data
             points or models is at the center of many problems in
             computer vision, recommender systems, bio/health informatics
             as well as image and natural language processing. Given
             pairwise dissimilarities between the elements of a 'source
             set' and a 'target set,' we consider the problem of finding
             a subset of the source set, called representatives or
             exemplars, that can efficiently describe the target set. We
             formulate the problem as a row-sparsity regularized trace
             minimization problem. Since the proposed formulation is, in
             general, NP-hard, we consider a convex relaxation. The
             solution of our optimization finds representatives and the
             assignment of each element of the target set to each
             representative, hence, obtaining a clustering. We analyze
             the solution of our proposed optimization as a function of
             the regularization parameter. We show that when the two sets
             jointly partition into multiple groups, our algorithm finds
             representatives from all groups and reveals clustering of
             the sets. In addition, we show that the proposed framework
             can effectively deal with outliers. Our algorithm works with
             arbitrary dissimilarities, which can be asymmetric or
             violate the triangle inequality. To efficiently implement
             our algorithm, we consider an Alternating Direction Method
             of Multipliers (ADMM) framework, which results in quadratic
             complexity in the problem size. We show that the ADMM
             implementation allows to parallelize the algorithm, hence
             further reducing the computational time. Finally, by
             experiments on real-world datasets, we show that our
             proposed algorithm improves the state of the art on the two
             problems of scene categorization using representative images
             and time-series modeling and segmentation using
             representative models.},
   Doi = {10.1109/tpami.2015.2511748},
   Key = {fds322673}
}

@article{fds322674,
   Author = {Fiori, M and Muse, P and Tepper, M and Sapiro, G},
   Title = {Tell me where you are and i tell you where you are going:
             Estimation of dynamic mobility graphs},
   Journal = {Proceedings of the IEEE Sensor Array and Multichannel Signal
             Processing Workshop},
   Volume = {2016-September},
   Publisher = {IEEE},
   Year = {2016},
   Month = {September},
   ISBN = {9781509021031},
   url = {http://dx.doi.org/10.1109/SAM.2016.7569685},
   Abstract = {The interest in problems related to graph inference has been
             increasing significantly during the last decade. However,
             the vast majority of the problems addressed are either
             static, or systems where changes in one node are immediately
             reflected in other nodes. In this paper we address the
             problem of mobility graph estimation, when the available
             dataset has an asynchronous and time-variant nature. We
             present a formulation for this problem consisting on an
             optimization of a cost function having a fitting term to
             explain the observations with the dynamics of the system,
             and a sparsity promoting penalty term, in order to select
             the paths actually used. The formulation is tested on two
             publicly available real datasets on US aviation and NY taxi
             traffic, showing the importance of the problem and the
             applicability of the proposed framework.},
   Doi = {10.1109/SAM.2016.7569685},
   Key = {fds322674}
}

@article{fds322675,
   Author = {Giryes, R and Sapiro, G and Bronstein, AM},
   Title = {Deep Neural Networks with Random Gaussian Weights: A
             Universal Classification Strategy?},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {64},
   Number = {13},
   Pages = {3444-3457},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2016},
   Month = {July},
   url = {http://dx.doi.org/10.1109/TSP.2016.2546221},
   Abstract = {Three important properties of a classification machinery are
             i) the system preserves the core information of the input
             data; ii) the training examples convey information about
             unseen data; and iii) the system is able to treat
             differently points from different classes. In this paper, we
             show that these fundamental properties are satisfied by the
             architecture of deep neural networks. We formally prove that
             these networks with random Gaussian weights perform a
             distance-preserving embedding of the data, with a special
             treatment for in-class and out-of-class data. Similar points
             at the input of the network are likely to have a similar
             output. The theoretical analysis of deep networks here
             presented exploits tools used in the compressed sensing and
             dictionary learning literature, thereby making a formal
             connection between these important topics. The derived
             results allow drawing conclusions on the metric learning
             properties of the network and their relation to its
             structure, as well as providing bounds on the required size
             of the training set such that the training examples would
             represent faithfully the unseen data. The results are
             validated with state-of-the-art trained networks.},
   Doi = {10.1109/TSP.2016.2546221},
   Key = {fds322675}
}

@article{fds322676,
   Author = {Tepper, M and Sapiro, G},
   Title = {A short-graph fourier transform via personalized pagerank
             vectors},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2016-May},
   Pages = {4806-4810},
   Publisher = {IEEE},
   Year = {2016},
   Month = {May},
   ISBN = {9781479999880},
   url = {http://dx.doi.org/10.1109/ICASSP.2016.7472590},
   Abstract = {The short-time Fourier transform (STFT) is widely used to
             analyze the spectra of temporal signals that vary through
             time. Signals defined over graphs, due to their intrinsic
             complexity, exhibit large variations in their patterns. In
             this work we propose a new formulation for an STFT for
             signals defined over graphs. This formulation draws on
             recent ideas from spectral graph theory, using personalized
             PageRank vectors as its fundamental building block.
             Furthermore, this work establishes and explores the
             connection between local spectral graph theory and localized
             spectral analysis of graph signals. We accompany the
             presentation with synthetic and real-world examples, showing
             the suitability of the proposed approach.},
   Doi = {10.1109/ICASSP.2016.7472590},
   Key = {fds322676}
}

@article{fds322677,
   Author = {Tepper, M and Sapiro, G},
   Title = {Compressed Nonnegative Matrix Factorization Is Fast and
             Accurate},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {64},
   Number = {9},
   Pages = {2269-2283},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2016},
   Month = {May},
   url = {http://dx.doi.org/10.1109/TSP.2016.2516971},
   Abstract = {Nonnegative matrix factorization (NMF) has an established
             reputation as a useful data analysis technique in numerous
             applications. However, its usage in practical situations is
             undergoing challenges in recent years. The fundamental
             factor to this is the increasingly growing size of the
             datasets available and needed in the information sciences.
             To address this, in this work we propose to use structured
             random compression, that is, random projections that exploit
             the data structure, for two NMF variants: classical and
             separable. In separable NMF (SNMF), the left factors are a
             subset of the columns of the input matrix. We present
             suitable formulations for each problem, dealing with
             different representative algorithms within each one. We show
             that the resulting compressed techniques are faster than
             their uncompressed variants, vastly reduce memory demands,
             and do not encompass any significant deterioration in
             performance. The proposed structured random projections for
             SNMF allow to deal with arbitrarily shaped large matrices,
             beyond the standard limit of tall-and-skinny matrices,
             granting access to very efficient computations in this
             general setting. We accompany the algorithmic presentation
             with theoretical foundations and numerous and diverse
             examples, showing the suitability of the proposed
             approaches. Our implementations are publicly
             available.},
   Doi = {10.1109/TSP.2016.2516971},
   Key = {fds322677}
}

@article{fds322678,
   Author = {Qiu, Q and Thompson, A and Calderbank, R and Sapiro,
             G},
   Title = {Data Representation Using the Weyl Transform},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {64},
   Number = {7},
   Pages = {1844-1853},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2016},
   Month = {April},
   url = {http://dx.doi.org/10.1109/TSP.2015.2505661},
   Abstract = {The Weyl transform is introduced as a rich framework for
             data representation. Transform coefficients are connected to
             the Walsh-Hadamard transform of multiscale autocorrelations,
             and different forms of dyadic periodicity in a signal are
             shown to appear as different features in its Weyl
             coefficients. The Weyl transform has a high degree of
             symmetry with respect to a large group of multiscale
             transformations, which allows compact yet discriminative
             representations to be obtained by pooling coefficients. The
             effectiveness of the Weyl transform is demonstrated through
             the example of textured image classification.},
   Doi = {10.1109/TSP.2015.2505661},
   Key = {fds322678}
}

@article{fds322680,
   Author = {Chang, Z and Qiu, Q and Sapiro, G},
   Title = {Synthesis-based low-cost gaze analysis},
   Journal = {Communications in Computer and Information
             Science},
   Volume = {618},
   Pages = {95-100},
   Publisher = {Springer International Publishing},
   Year = {2016},
   Month = {January},
   ISBN = {9783319405414},
   url = {http://dx.doi.org/10.1007/978-3-319-40542-1_15},
   Abstract = {Gaze analysis has gained much popularity over the years due
             to its relevance in a wide array of applications, including
             humancomputer interaction, fatigue detection, and clinical
             mental health diagnosis. However, accurate gaze estimation
             from low resolution images outside of the lab (in the wild)
             still proves to be a challenging task. The new Intel
             low-cost RealSense 3D camera, capable of acquiring
             submillimeter resolution depth information, is currently
             available in laptops, and such technology is expected to
             become ubiquitous in other portable devices. In this paper,
             we focus on low-cost, scalable and real time analysis of
             human gaze using this RealSense camera. We exploit the
             direct measurement of eye surface geometry captured by the
             RGB-D camera, and perform gaze estimation through novel
             synthesis-based training and testing. Furthermore, we
             synthesize different eye movement appearances using a linear
             approach. From each 3D eye training sample captured by the
             RealSense camera, we synthesize multiple novel 2D views by
             varying the view angle to simulate head motions expected at
             testing. We then learn from the synthesized 2D eye images a
             gaze regression model using regression forests. At testing,
             for each captured RGB-D eye image, we first repeat the same
             synthesis process. For each synthesized image, we estimate
             the gaze from our gaze regression model, and factor-out the
             associated camera/head motion. In this way, we obtain
             multiple gaze estimations for each RGB-D eye image, and the
             consensus is adopted. We show that this synthesis-based
             training and testing significantly improves the precision in
             gaze estimation, opening the door to true low-cost
             solutions.},
   Doi = {10.1007/978-3-319-40542-1_15},
   Key = {fds322680}
}

@article{fds322681,
   Author = {Lyzinski, V and Fishkind, DE and Fiori, M and Vogelstein, JT and Priebe,
             CE and Sapiro, G},
   Title = {Graph Matching: Relax at Your Own Risk.},
   Journal = {IEEE transactions on pattern analysis and machine
             intelligence},
   Volume = {38},
   Number = {1},
   Pages = {60-73},
   Year = {2016},
   Month = {January},
   url = {http://dx.doi.org/10.1109/tpami.2015.2424894},
   Abstract = {Graph matching-aligning a pair of graphs to minimize their
             edge disagreements-has received wide-spread attention from
             both theoretical and applied communities over the past
             several decades, including combinatorics, computer vision,
             and connectomics. Its attention can be partially attributed
             to its computational difficulty. Although many heuristics
             have previously been proposed in the literature to
             approximately solve graph matching, very few have any
             theoretical support for their performance. A common
             technique is to relax the discrete problem to a continuous
             problem, therefore enabling practitioners to bring
             gradient-descent-type algorithms to bear. We prove that an
             indefinite relaxation (when solved exactly) almost always
             discovers the optimal permutation, while a common convex
             relaxation almost always fails to discover the optimal
             permutation. These theoretical results suggest that
             initializing the indefinite algorithm with the convex
             optimum might yield improved practical performance. Indeed,
             experimental results illuminate and corroborate these
             theoretical findings, demonstrating that excellent results
             are achieved in both benchmark and real data problems by
             amalgamating the two approaches.},
   Doi = {10.1109/tpami.2015.2424894},
   Key = {fds322681}
}

@article{fds322213,
   Author = {Carpenter, KLH and Sprechmann, P and Calderbank, R and Sapiro, G and Egger, HL},
   Title = {Quantifying Risk for Anxiety Disorders in Preschool
             Children: A Machine Learning Approach.},
   Journal = {PLoS One},
   Volume = {11},
   Number = {11},
   Pages = {e0165524},
   Year = {2016},
   url = {http://dx.doi.org/10.1371/journal.pone.0165524},
   Abstract = {Early childhood anxiety disorders are common, impairing, and
             predictive of anxiety and mood disorders later in childhood.
             Epidemiological studies over the last decade find that the
             prevalence of impairing anxiety disorders in preschool
             children ranges from 0.3% to 6.5%. Yet, less than 15% of
             young children with an impairing anxiety disorder receive a
             mental health evaluation or treatment. One possible reason
             for the low rate of care for anxious preschoolers is the
             lack of affordable, timely, reliable and valid tools for
             identifying young children with clinically significant
             anxiety. Diagnostic interviews assessing psychopathology in
             young children require intensive training, take hours to
             administer and code, and are not available for use outside
             of research settings. The Preschool Age Psychiatric
             Assessment (PAPA) is a reliable and valid structured
             diagnostic parent-report interview for assessing
             psychopathology, including anxiety disorders, in 2 to 5 year
             old children. In this paper, we apply machine-learning tools
             to already collected PAPA data from two large community
             studies to identify sub-sets of PAPA items that could be
             developed into an efficient, reliable, and valid screening
             tool to assess a young child's risk for an anxiety disorder.
             Using machine learning, we were able to decrease by an order
             of magnitude the number of items needed to identify a child
             who is at risk for an anxiety disorder with an accuracy of
             over 96% for both generalized anxiety disorder (GAD) and
             separation anxiety disorder (SAD). Additionally, rather than
             considering GAD or SAD as discrete/binary entities, we
             present a continuous risk score representing the child's
             risk of meeting criteria for GAD or SAD. Identification of a
             short question-set that assesses risk for an anxiety
             disorder could be a first step toward development and
             validation of a relatively short screening tool feasible for
             use in pediatric clinics and daycare/preschool
             settings.},
   Doi = {10.1371/journal.pone.0165524},
   Key = {fds322213}
}

@article{fds322682,
   Author = {Kim, J and Duchin, Y and Sapiro, G and Vitek, J and Harel,
             N},
   Title = {Clinical deep brain stimulation region prediction using
             regression forests from high-field MRI},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {2015-December},
   Pages = {2480-2484},
   Publisher = {IEEE},
   Year = {2015},
   Month = {December},
   ISBN = {9781479983391},
   url = {http://dx.doi.org/10.1109/ICIP.2015.7351248},
   Abstract = {This paper presents a prediction framework of brain
             subcortical structures which are invisible on clinical
             low-field MRI, learning detailed information from
             ultrahigh-field MR training data. Volumetric segmentation of
             Deep Brain Stimulation (DBS) structures within the Basal
             ganglia is a prerequisite process for reliable DBS surgery.
             While ultrahigh-field MR imaging (7 Tesla) allows direct
             visualization of DBS targeting structures, such
             ultrahigh-fields are not always clinically available, and
             therefore the relevant structures need to be predicted from
             the clinical data. We address the shape prediction problem
             with a regression forest, non-linearly mapping predictors to
             target structures with high confidence, exploiting
             ultrahigh-field MR training data. We consider an application
             for the subthalamic nucleus (STN) prediction as a crucial
             DBS target. Experimental results on Parkinson's patients
             validate that the proposed approach enables reliable
             estimation of the STN from clinical 1.5T
             MRI.},
   Doi = {10.1109/ICIP.2015.7351248},
   Key = {fds322682}
}

@article{fds322683,
   Author = {Tepper, M and Newson, A and Sprechmann, P and Sapiro,
             G},
   Title = {Multi-temporal foreground detection in videos},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {2015-December},
   Pages = {4599-4603},
   Year = {2015},
   Month = {December},
   ISBN = {9781479983391},
   url = {http://dx.doi.org/10.1109/ICIP.2015.7351678},
   Abstract = {A common task in video processing is the binary separation
             of a video's content into either background or moving
             foreground. However, many situations require a foreground
             analysis with a finer temporal granularity, in particular
             for objects or people which remain immobile for a certain
             period of time. We propose an efficient method which detects
             foreground at different timescales, by exploiting the
             desirable theoretical and practical properties of Robust
             Principal Component Analysis. Our algorithm can be used in a
             variety of scenarios such as detecting people who have
             fallen in a video, or analysing the fluidity of road
             traffic, while avoiding costly computations needed for
             nearest neighbours searches or optical flow analysis.
             Finally, our algorithm has the useful ability to perform
             motion analysis without explicitly requiring computationally
             expensive motion estimation.},
   Doi = {10.1109/ICIP.2015.7351678},
   Key = {fds322683}
}

@article{fds322684,
   Author = {Hashemi, J and Qiu, Q and Sapiro, G},
   Title = {Cross-modality pose-invariant facial expression},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {2015-December},
   Pages = {4007-4011},
   Publisher = {IEEE},
   Year = {2015},
   Month = {December},
   ISBN = {9781479983391},
   url = {http://dx.doi.org/10.1109/ICIP.2015.7351558},
   Abstract = {In this work, we present a dictionary learning based
             framework for robust, cross-modality, and pose-invariant
             facial expression recognition. The proposed framework first
             learns a dictionary that i) contains both 3D shape and
             morphological information as well as 2D texture and
             geometric information, ii) enforces coherence across both 2D
             and 3D modalities and different poses, and iii) is robust in
             the sense that a learned dictionary can be applied across
             multiple facial expression datasets. We demonstrate that
             enforcing domain specific block structures on the
             dictionary, given a test expression sample, we can transform
             such sample across different domains for tasks such as pose
             alignment. We validate our approach on the task of
             pose-invariant facial expression recognition on the standard
             BU3D-FE and MultiPie datasets, achieving state of the art
             performance.},
   Doi = {10.1109/ICIP.2015.7351558},
   Key = {fds322684}
}

@article{fds367692,
   Author = {Delbracio, M and Sapiro, G},
   Title = {Hand-Held Video Deblurring Via Efficient Fourier
             Aggregation},
   Journal = {IEEE Transactions on Computational Imaging},
   Volume = {1},
   Number = {4},
   Pages = {270-283},
   Year = {2015},
   Month = {December},
   url = {http://dx.doi.org/10.1109/TCI.2015.2501245},
   Abstract = {Videos captured with hand-held cameras often suffer from a
             significant amount of blur, mainly caused by the inevitable
             natural tremor of the photographer's hand. In this work, we
             present an algorithm that removes blur due to camera shake
             by combining information in the Fourier domain from nearby
             frames in a video. The dynamic nature of typical videos with
             the presence of multiple moving objects and occlusions makes
             this problem of camera shake removal extremely challenging,
             in particular when low complexity is needed. Given an input
             video frame, we first create a consistent registered version
             of temporally adjacent frames. Then, the set of consistently
             registered frames is block-wise fused in the Fourier domain
             with weights depending on the Fourier spectrum magnitude.
             The method is motivated from the physiological fact that
             camera shake blur has a random nature; therefore, nearby
             video frames are generally blurred differently. Experiments
             with numerous videos recorded in the wild, along with
             extensive comparisons, show that the proposed algorithm
             achieves state-of-the-art results while at the same time
             being much faster than its competitors.},
   Doi = {10.1109/TCI.2015.2501245},
   Key = {fds367692}
}

@article{fds264703,
   Author = {Delbracio, M and Sapiro, G},
   Title = {Removing Camera Shake via Weighted Fourier Burst
             Accumulation.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {24},
   Number = {11},
   Pages = {3293-3307},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2015},
   Month = {November},
   ISSN = {1057-7149},
   url = {http://dx.doi.org/10.1109/tip.2015.2442914},
   Abstract = {Numerous recent approaches attempt to remove image blur due
             to camera shake, either with one or multiple input images,
             by explicitly solving an inverse and inherently ill-posed
             deconvolution problem. If the photographer takes a burst of
             images, a modality available in virtually all modern digital
             cameras, we show that it is possible to combine them to get
             a clean sharp version. This is done without explicitly
             solving any blur estimation and subsequent inverse problem.
             The proposed algorithm is strikingly simple: it performs a
             weighted average in the Fourier domain, with weights
             depending on the Fourier spectrum magnitude. The method can
             be seen as a generalization of the align and average
             procedure, with a weighted average, motivated by hand-shake
             physiology and theoretically supported, taking place in the
             Fourier domain. The method's rationale is that camera shake
             has a random nature, and therefore, each image in the burst
             is generally blurred differently. Experiments with real
             camera data, and extensive comparisons, show that the
             proposed Fourier burst accumulation algorithm achieves
             state-of-the-art results an order of magnitude faster, with
             simplicity for on-board implementation on camera phones.
             Finally, we also present experiments in real high dynamic
             range (HDR) scenes, showing how the method can be
             straightforwardly extended to HDR photography.},
   Doi = {10.1109/tip.2015.2442914},
   Key = {fds264703}
}

@article{fds322685,
   Author = {Delbracio, M and Sapiro, G},
   Title = {Burst deblurring: Removing camera shake through fourier
             burst accumulation},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Volume = {07-12-June-2015},
   Pages = {2385-2393},
   Publisher = {IEEE},
   Year = {2015},
   Month = {October},
   ISBN = {9781467369640},
   url = {http://dx.doi.org/10.1109/CVPR.2015.7298852},
   Abstract = {Numerous recent approaches attempt to remove image blur due
             to camera shake, either with one or multiple input images,
             by explicitly solving an inverse and inherently ill-posed
             deconvolution problem. If the photographer takes a burst of
             images, a modality available in virtually all modern digital
             cameras, we show that it is possible to combine them to get
             a clean sharp version. This is done without explicitly
             solving any blur estimation and subsequent inverse problem.
             The proposed algorithm is strikingly simple: it performs a
             weighted average in the Fourier domain, with weights
             depending on the Fourier spectrum magnitude. The method's
             rationale is that camera shake has a random nature and
             therefore each image in the burst is generally blurred
             differently. Experiments with real camera data show that the
             proposed Fourier Burst Accumulation algorithm achieves
             state-of-the-art results an order of magnitude faster, with
             simplicity for on-board implementation on camera
             phones.},
   Doi = {10.1109/CVPR.2015.7298852},
   Key = {fds322685}
}

@article{fds304066,
   Author = {Sprechmann, P and Bronstein, AM and Sapiro, G},
   Title = {Learning Efficient Sparse and Low Rank Models.},
   Journal = {IEEE transactions on pattern analysis and machine
             intelligence},
   Volume = {37},
   Number = {9},
   Pages = {1821-1833},
   Year = {2015},
   Month = {September},
   url = {http://arxiv.org/abs/1212.3631v1},
   Abstract = {Parsimony, including sparsity and low rank, has been shown
             to successfully model data in numerous machine learning and
             signal processing tasks. Traditionally, such modeling
             approaches rely on an iterative algorithm that minimizes an
             objective function with parsimony-promoting terms. The
             inherently sequential structure and data-dependent
             complexity and latency of iterative optimization constitute
             a major limitation in many applications requiring real-time
             performance or involving large-scale data. Another
             limitation encountered by these modeling techniques is the
             difficulty of their inclusion in discriminative learning
             scenarios. In this work, we propose to move the emphasis
             from the model to the pursuit algorithm, and develop a
             process-centric view of parsimonious modeling, in which a
             learned deterministic fixed-complexity pursuit process is
             used in lieu of iterative optimization. We show a principled
             way to construct learnable pursuit process architectures for
             structured sparse and robust low rank models, derived from
             the iteration of proximal descent algorithms. These
             architectures learn to approximate the exact parsimonious
             representation at a fraction of the complexity of the
             standard optimization methods. We also show that appropriate
             training regimes allow to naturally extend parsimonious
             models to discriminative settings. State-of-the-art results
             are demonstrated on several challenging problems in image
             and audio processing with several orders of magnitude
             speed-up compared to the exact optimization
             algorithms.},
   Doi = {10.1109/tpami.2015.2392779},
   Key = {fds304066}
}

@article{fds322686,
   Author = {Huang, J and Qiu, Q and Calderbank, R and Rodrigues, M and Sapiro,
             G},
   Title = {Alignment with intra-class structure can improve
             classification},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2015-August},
   Pages = {1921-1925},
   Publisher = {IEEE},
   Year = {2015},
   Month = {August},
   ISBN = {9781467369978},
   url = {http://dx.doi.org/10.1109/ICASSP.2015.7178305},
   Abstract = {High dimensional data is modeled using low-rank subspaces,
             and the probability of misclassification is expressed in
             terms of the principal angles between subspaces. The form
             taken by this expression motivates the design of a new
             feature extraction method that enlarges inter-class
             separation, while preserving intra-class structure. The
             method can be tuned to emphasize different features shared
             by members within the same class. Classification performance
             is compared to that of state-of-the-art methods on synthetic
             data and on the real face database. The probability of
             misclassification is decreased when intra-class structure is
             taken into account.},
   Doi = {10.1109/ICASSP.2015.7178305},
   Key = {fds322686}
}

@article{fds291305,
   Author = {Kim, J and Duchin, Y and Sapiro, G and Vitek, J and Harel,
             N},
   Title = {Clinical subthalamic nucleus prediction from high-field
             brain MRI},
   Journal = {Proceedings - International Symposium on Biomedical
             Imaging},
   Volume = {2015-July},
   Pages = {1264-1267},
   Publisher = {IEEE},
   Year = {2015},
   Month = {July},
   ISBN = {9781479923748},
   ISSN = {1945-7928},
   url = {http://dx.doi.org/10.1109/ISBI.2015.7164104},
   Abstract = {The subthalamic nucleus (STN) within the sub-cortical region
             of the Basal ganglia is a crucial targeting structure for
             Parkinson's Deep brain stimulation (DBS) surgery. Volumetric
             segmentation of such small and complex structure, which is
             elusive in clinical MRI protocols, is thereby a
             pre-requisite process for reliable DBS direct targeting.
             While direct visualization of the STN is facilitated with
             advanced ultrahigh-field MR imaging (7 Tesla), such high
             fields are not always clinically available. In this paper,
             we aim at the automatic prediction of the STN region on
             clinical low-field MRI, exploiting dependencies between the
             STN and its adjacent structures, learned from
             ultrahigh-field MRI. We present a framework based on a
             statistical shape model to learn such shape relationship on
             high quality MR data sets. This allows for an accurate
             prediction and visualization of the STN structure, given
             detectable predictors on the low-field MRI. Experimental
             results on Parkinson's patients demonstrate that the
             proposed approach enables accurate estimation of the STN on
             clinical 1.5T MRI.},
   Doi = {10.1109/ISBI.2015.7164104},
   Key = {fds291305}
}

@article{fds264698,
   Author = {Lucas, JE and Sapiro, G},
   Title = {Cancer: What's luck got to do with it?},
   Journal = {Significance},
   Volume = {12},
   Number = {2},
   Pages = {40-42},
   Publisher = {WILEY},
   Year = {2015},
   Month = {April},
   ISSN = {1740-9705},
   url = {http://dx.doi.org/10.1111/j.1740-9713.2015.00816.x},
   Abstract = {Recent press reports would have you believe that cancer
             develops randomly, and healthy living makes little
             difference. But that is a gross misinterpretation of a
             recent scientific paper, as Joseph E. Lucas and Guillermo
             Sapiro explain.},
   Doi = {10.1111/j.1740-9713.2015.00816.x},
   Key = {fds264698}
}

@article{fds346295,
   Author = {Fiori, M and Sapiro, G},
   Title = {On spectral properties for graph matching and graph
             isomorphism problems},
   Journal = {Information and Inference},
   Volume = {4},
   Number = {1},
   Pages = {63-76},
   Year = {2015},
   Month = {March},
   url = {http://dx.doi.org/10.1093/imaiai/iav002},
   Abstract = {Problems related to graph matching and isomorphisms are very
             important both from a theoretical and practical perspective,
             with applications ranging from image and video analysis to
             biological and biomedical problems. The graph matching
             problem is challenging from a computational point of view,
             and therefore different relaxations are commonly used.
             Although common relaxations techniques tend to work well for
             matching perfectly isomorphic graphs, it is not yet fully
             understood under which conditions the relaxed problem is
             guaranteed to obtain the correct answer. In this paper, we
             prove that the graph matching problem and its most common
             convex relaxation, where the matching domain of permutation
             matrices is substituted with its convex hull of
             doubly-stochastic matrices, are equivalent for a certain
             class of graphs, such equivalence being based on spectral
             properties of the corresponding adjacency matrices. We also
             derive results about the automorphism group of a graph, and
             provide fundamental spectral properties of the adjacency
             matrix.},
   Doi = {10.1093/imaiai/iav002},
   Key = {fds346295}
}

@article{fds322679,
   Author = {Huang, J and Qiu, Q and Calderbank, R and Sapiro,
             G},
   Title = {Geometry-aware deep transform},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Volume = {2015 International Conference on Compute},
   Pages = {4139-4147},
   Publisher = {IEEE},
   Year = {2015},
   Month = {February},
   ISBN = {9781467383912},
   url = {http://dx.doi.org/10.1109/ICCV.2015.471},
   Abstract = {Many recent efforts have been devoted to designing
             sophisticated deep learning structures, obtaining
             revolutionary results on benchmark datasets. The success of
             these deep learning methods mostly relies on an enormous
             volume of labeled training samples to learn a huge number of
             parameters in a network, therefore, understanding the
             generalization ability of a learned deep network cannot be
             overlooked, especially when restricted to a small training
             set, which is the case for many applications. In this paper,
             we propose a novel deep learning objective formulation that
             unifies both the classification and metric learning
             criteria. We then introduce a geometry-aware deep transform
             to enable a non-linear discriminative and robust feature
             transform, which shows competitive performance on small
             training sets for both synthetic and real-world data. We
             further support the proposed framework with a formal
             (K)-robustness analysis.},
   Doi = {10.1109/ICCV.2015.471},
   Key = {fds322679}
}

@article{fds304057,
   Author = {Qiu, Q and Sapiro, G},
   Title = {Learning transformations for clustering and
             classification},
   Journal = {Journal of Machine Learning Research},
   Volume = {16},
   Pages = {187-225},
   Year = {2015},
   Month = {February},
   url = {http://arxiv.org/abs/1309.2074v2},
   Abstract = {A low-rank transformation learning framework for subspace
             clustering and classification is proposed here. Many
             high-dimensional data, such as face images and motion
             sequences, approximately lie in a union of low-dimensional
             subspaces. The corresponding subspace clustering problem has
             been extensively studied in the literature to partition such
             high-dimensional data into clusters corresponding to their
             underlying low-dimensional subspaces. Low-dimensional
             intrinsic structures are often violated for real-world
             observations, as they can be corrupted by errors or deviate
             from ideal models. We propose to address this by learning a
             linear transformation on subspaces using nuclear norm as the
             modeling and optimization criteria. The learned linear
             transformation restores a low-rank structure for data from
             the same subspace, and, at the same time, forces a maximally
             separated structure for data from different subspaces. In
             this way, we reduce variations within the subspaces, and
             increase separation between the subspaces for a more robust
             subspace clustering. This proposed learned robust subspace
             clustering framework significantly enhances the performance
             of existing subspace clustering methods. Basic theoretical
             results presented here help to further support the
             underlying framework. To exploit the low-rank structures of
             the transformed subspaces, we further introduce a fast
             subspace clustering technique, which efficiently combines
             robust PCA with sparse modeling. When class labels are
             present at the training stage, we show this low-rank
             transformation framework also significantly enhances
             classification performance. Extensive experiments using
             public data sets are presented, showing that the proposed
             approach significantly outperforms state-of-the-art methods
             for subspace clustering and classification. The learned low
             cost transform is also applicable to other classification
             frameworks.},
   Key = {fds304057}
}

@article{fds264706,
   Author = {Yang, J and Liao, X and Yuan, X and Llull, P and Brady, DJ and Sapiro, G and Carin, L},
   Title = {Compressive sensing by learning a Gaussian mixture model
             from measurements.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {24},
   Number = {1},
   Pages = {106-119},
   Year = {2015},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://dx.doi.org/10.1109/tip.2014.2365720},
   Abstract = {Compressive sensing of signals drawn from a Gaussian mixture
             model (GMM) admits closed-form minimum mean squared error
             reconstruction from incomplete linear measurements. An
             accurate GMM signal model is usually not available a priori,
             because it is difficult to obtain training signals that
             match the statistics of the signals being sensed. We propose
             to solve that problem by learning the signal model in situ,
             based directly on the compressive measurements of the
             signals, without resorting to other signals to train a
             model. A key feature of our method is that the signals being
             sensed are treated as random variables and are integrated
             out in the likelihood. We derive a maximum marginal
             likelihood estimator (MMLE) that maximizes the likelihood of
             the GMM of the underlying signals given only their linear
             compressive measurements. We extend the MMLE to a GMM with
             dominantly low-rank covariance matrices, to gain
             computational speedup. We report extensive experimental
             results on image inpainting, compressive sensing of
             high-speed video, and compressive hyperspectral imaging (the
             latter two based on real compressive cameras). The results
             demonstrate that the proposed methods outperform
             state-of-the-art methods by significant margins.},
   Doi = {10.1109/tip.2014.2365720},
   Key = {fds264706}
}

@article{fds345824,
   Author = {Giryes, R and Sapiro, G and Bronstein, AM},
   Title = {On the stability of deep networks},
   Journal = {3rd International Conference on Learning Representations,
             ICLR 2015 - Workshop Track Proceedings},
   Year = {2015},
   Month = {January},
   Abstract = {© 2015 International Conference on Learning
             Representations, ICLR. All rights reserved. In this work we
             study the properties of deep neural networks (DNN) with
             random weights. We formally prove that these networks
             perform a distance-preserving embedding of the data. Based
             on this we then draw conclusions on the size of the training
             data and the networks’ structure. A longer version of this
             paper with more results and details can be found in (Giryes
             et al., 2015). In particular, we formally prove in (Giryes
             et al., 2015) that DNN with random Gaussian weights perform
             a distance-preserving embedding of the data, with a special
             treatment for in-class and out-of-class data.},
   Key = {fds345824}
}

@article{fds335974,
   Author = {Llull, P and Yuan, X and Liao, X and Yang, J and Kittle, D and Carin, L and Sapiro, G and Brady, DJ},
   Title = {Temporal compressive sensing for video},
   Number = {9783319160412},
   Pages = {41-74},
   Publisher = {Springer International Publishing},
   Year = {2015},
   Month = {January},
   ISBN = {9783319160412},
   url = {http://dx.doi.org/10.1007/978-3-319-16042-9_2},
   Abstract = {Video camera architects must design cameras capable of
             high-quality, dynamic event capture, while adhering to power
             and communications constraints. Though modern imagers are
             capable of both simultaneous spatial and temporal
             resolutions at micrometer and microsecond scales, the power
             required to sample at these rates is undesirable. The field
             of compressive sensing (CS) has recently suggested a
             solution to this design challenge. By exploiting
             physical-layer compression strategies, one may overlay the
             original scene with a coding sequence to sample at
             sub-Nyquist rates with virtually no additional power
             requirement. The underlying scene may be later estimated
             without significant loss of fidelity. In this chapter, we
             cover a variety of such strategies taken to improve an
             imager’s temporal resolution. Highlighting a new low-power
             acquisition paradigm, we show how a video sequence of high
             temporal resolution may be reconstructed from a single video
             frame taken with a low-framerate camera.},
   Doi = {10.1007/978-3-319-16042-9_2},
   Key = {fds335974}
}

@article{fds322688,
   Author = {Kim, J and Duchin, Y and Kim, H and Vitek, J and Harel, N and Sapiro,
             G},
   Title = {Robust prediction of clinical deep brain stimulation target
             structures via the estimation of influential high-field MR
             atlases},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {9350},
   Pages = {587-594},
   Publisher = {Springer International Publishing},
   Year = {2015},
   Month = {January},
   ISBN = {9783319245706},
   url = {http://dx.doi.org/10.1007/978-3-319-24571-3_70},
   Abstract = {This work introduces a robust framework for predicting Deep
             Brain Stimulation (DBS) target structures which are not
             identifiable on standard clinical MRI. While recent
             high-field MR imaging allows clear visualization of DBS
             target structures, such high-fields are not clinically
             available, and therefore DBS targeting needs to be performed
             on the standard clinical low contrast data. We first learn
             via regression models the shape relationships between DBS
             targets and their potential predictors from high-field (7
             Tesla) MR training sets. A bagging procedure is utilized in
             the regression model, reducing the variability of learned
             dependencies. Then, given manually or automatically detected
             predictors on the clinical patient data, the target
             structure is predicted using the learned high quality
             information. Moreover, we derive a robust way to properly
             weight different training subsets, yielding higher accuracy
             when using an ensemble of predictions. The subthalamic
             nucleus (STN), the most common DBS target for Parkinson’s
             disease, is used to exemplify within our framework.
             Experimental validation from Parkinson’s patients shows
             that the proposed approach enables reliable prediction of
             the STN from the clinical 1.5T MR data.},
   Doi = {10.1007/978-3-319-24571-3_70},
   Key = {fds322688}
}

@article{fds322689,
   Author = {Huang, J and Qiu, Q and Sapiro, G and Calderbank,
             R},
   Title = {Discriminative robust transformation learning},
   Journal = {Advances in Neural Information Processing
             Systems},
   Volume = {2015-January},
   Pages = {1333-1341},
   Year = {2015},
   Month = {January},
   Abstract = {This paper proposes a framework for learning features that
             are robust to data variation, which is particularly
             important when only a limited number of training samples are
             available. The framework makes it possible to tradeoff the
             discriminative value of learned features against the
             generalization error of the learning algorithm. Robustness
             is achieved by encouraging the transform that maps data to
             features to be a local isometry. This geometric property is
             shown to improve (K, ∈)-robustness, thereby providing
             theoretical justification for reductions in generalization
             error observed in experiments. The proposed optimization
             framework is used to train standard learning algorithms such
             as deep neural networks. Experimental results obtained on
             benchmark datasets, such as labeled faces in the wild,
             demonstrate the value of being able to balance
             discrimination and robustness.},
   Key = {fds322689}
}

@article{fds322690,
   Author = {Tepper, M and Sapiro, G},
   Title = {From local to global communities in large networks through
             consensus},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {9423},
   Pages = {659-666},
   Publisher = {Springer International Publishing},
   Year = {2015},
   Month = {January},
   ISBN = {9783319257501},
   url = {http://dx.doi.org/10.1007/978-3-319-25751-8_79},
   Abstract = {Given a universe of local communities of a large network, we
             aim at identifying the meaningful and consistent communities
             in it. We address this from a new perspective as the process
             of obtaining consensual community detections and formalize
             it as a bi-clustering problem. We obtain the global
             community structure of the given network without running
             expensive global community detection algorithms. The
             proposed mathematical characterization of the consensus
             problem and a new biclustering algorithm to solve it render
             the problem tractable for large networks. The approach is
             successfully validated in experiments with synthetic and
             large real-world networks, outperforming other
             state-ofthe-art alternatives in terms of speed and results
             quality.},
   Doi = {10.1007/978-3-319-25751-8_79},
   Key = {fds322690}
}

@article{fds345822,
   Author = {Qiu, Q and Sapiro, G and Bronstein, A},
   Title = {Random forests can hash},
   Journal = {3rd International Conference on Learning Representations,
             ICLR 2015 - Workshop Track Proceedings},
   Year = {2015},
   Month = {January},
   Abstract = {© 2015 International Conference on Learning
             Representations, ICLR. All rights reserved. Hash codes are a
             very efficient data representation needed to be able to cope
             with the ever growing amounts of data. We introduce a random
             forest semantic hashing scheme with information-theoretic
             code aggregation, showing for the first time how random
             forest, a technique that together with deep learning have
             shown spectacular results in classification, can also be
             extended to large-scale retrieval. Traditional random forest
             fails to enforce the consistency of hashes generated from
             each tree for the same class data, i.e., to preserve the
             underlying similarity, and it also lacks a principled way
             for code aggregation across trees. We start with a simple
             hashing scheme, where independently trained random trees in
             a forest are acting as hashing functions. We the propose a
             subspace model as the splitting function, and show that it
             enforces the hash consistency in a tree for data from the
             same class. We also introduce an information-theoretic
             approach for aggregating codes of individual trees into a
             single hash code, producing a near-optimal unique hash for
             each class. Experiments on large-scale public datasets are
             presented, showing that the proposed approach significantly
             outperforms state-of-the-art hashing methods for retrieval
             tasks.},
   Key = {fds345822}
}

@article{fds345823,
   Author = {Qiu, Q and Thompson, A and Calderbank, R and Sapiro,
             G},
   Title = {Data representation using the Weyl transform},
   Journal = {3rd International Conference on Learning Representations,
             ICLR 2015 - Workshop Track Proceedings},
   Year = {2015},
   Month = {January},
   Abstract = {© 2015 International Conference on Learning
             Representations, ICLR. All rights reserved. The Weyl
             transform is introduced as a rich framework for data
             representation. Transform coefficients are connected to the
             Walsh-Hadamard transform of multiscale autocorrelations, and
             different forms of dyadic periodicity in a signal are shown
             to appear as different features in its Weyl coefficients.
             The Weyl transform has a high degree of symmetry with
             respect to a large group of multiscale transformations,
             which allows compact yet discriminative representations to
             be obtained by pooling coefficients. The effectiveness of
             the Weyl transform is demonstrated through the example of
             textured image classification.},
   Key = {fds345823}
}

@article{fds349515,
   Author = {Qiu, Q and Thompson, A and Calderbank, R and Sapiro,
             G},
   Title = {Data representation using the Weyl transform},
   Journal = {3rd International Conference on Learning Representations,
             ICLR 2015 - Workshop Track Proceedings},
   Year = {2015},
   Month = {January},
   Abstract = {The Weyl transform is introduced as a rich framework for
             data representation. Transform coefficients are connected to
             the Walsh-Hadamard transform of multiscale autocorrelations,
             and different forms of dyadic periodicity in a signal are
             shown to appear as different features in its Weyl
             coefficients. The Weyl transform has a high degree of
             symmetry with respect to a large group of multiscale
             transformations, which allows compact yet discriminative
             representations to be obtained by pooling coefficients. The
             effectiveness of the Weyl transform is demonstrated through
             the example of textured image classification.},
   Key = {fds349515}
}

@article{fds349516,
   Author = {Qiu, Q and Sapiro, G and Bronstein, A},
   Title = {Random forests can hash},
   Journal = {3rd International Conference on Learning Representations,
             ICLR 2015 - Workshop Track Proceedings},
   Year = {2015},
   Month = {January},
   Abstract = {Hash codes are a very efficient data representation needed
             to be able to cope with the ever growing amounts of data. We
             introduce a random forest semantic hashing scheme with
             information-theoretic code aggregation, showing for the
             first time how random forest, a technique that together with
             deep learning have shown spectacular results in
             classification, can also be extended to large-scale
             retrieval. Traditional random forest fails to enforce the
             consistency of hashes generated from each tree for the same
             class data, i.e., to preserve the underlying similarity, and
             it also lacks a principled way for code aggregation across
             trees. We start with a simple hashing scheme, where
             independently trained random trees in a forest are acting as
             hashing functions. We the propose a subspace model as the
             splitting function, and show that it enforces the hash
             consistency in a tree for data from the same class. We also
             introduce an information-theoretic approach for aggregating
             codes of individual trees into a single hash code, producing
             a near-optimal unique hash for each class. Experiments on
             large-scale public datasets are presented, showing that the
             proposed approach significantly outperforms state-of-the-art
             hashing methods for retrieval tasks.},
   Key = {fds349516}
}

@article{fds349517,
   Author = {Giryes, R and Sapiro, G and Bronstein, AM},
   Title = {On the stability of deep networks},
   Journal = {3rd International Conference on Learning Representations,
             ICLR 2015 - Workshop Track Proceedings},
   Year = {2015},
   Month = {January},
   Abstract = {In this work we study the properties of deep neural networks
             (DNN) with random weights. We formally prove that these
             networks perform a distance-preserving embedding of the
             data. Based on this we then draw conclusions on the size of
             the training data and the networks’ structure. A longer
             version of this paper with more results and details can be
             found in (Giryes et al., 2015). In particular, we formally
             prove in (Giryes et al., 2015) that DNN with random Gaussian
             weights perform a distance-preserving embedding of the data,
             with a special treatment for in-class and out-of-class
             data.},
   Key = {fds349517}
}

@article{fds330346,
   Author = {Hashemi, J and Campbell, K and Carpenter, K and Harris, A and Qiu, Q and Tepper, M and Espinosa, S and Schaich Borg and J and Marsan, S and Calderbank, R and Baker, J and Egger, HL and Dawson, G and Sapiro,
             G},
   Title = {A scalable app for measuring autism risk behaviors in young
             children: A technical validity and feasibility
             study},
   Journal = {Proceedings of the 5th EAI International Conference on
             Wireless Mobile Communication and Healthcare},
   Pages = {23-27},
   Publisher = {ICST},
   Year = {2015},
   url = {http://dx.doi.org/10.4108/eai.14-10-2015.2261939},
   Abstract = {In spite of recent advances in the genetics and neuroscience
             of early childhood mental health, behavioral observation is
             still the gold standard in screening, diagnosis, and outcome
             assessment. Unfortunately, clinical observation is often
             sub-jective, needs significant rater training, does not
             capture data from participants in their natural environment,
             and is not scalable for use in large populations or for
             longitu-dinal monitoring. To address these challenges, we
             devel-oped and tested a self-contained app designed to
             measure toddlers' social communication behaviors in a
             primary care, school, or home setting. Twenty 16-30 month
             old children with and without autism participated in this
             study. Tod-dlers watched the developmentally-Appropriate
             visual stim-uli on an iPad in a pediatric clinic and in our
             lab while the iPad camera simultaneously recorded video of
             the child's behaviors. Automated computer vision algorithms
             coded emotions and social referencing to quantify autism
             risk be-haviors. We validated our automatic computer coding
             by comparing the computer-generated analysis of facial
             expres-sion and social referencing to human coding of these
             behav-iors. We report our method and propose the development
             and testing of measures of young children's behaviors as the
             first step toward development of a novel, fully integrated,
             low-cost, scalable screening tool for autism and other
             neu-rodevelopmental disorders of early childhood.},
   Doi = {10.4108/eai.14-10-2015.2261939},
   Key = {fds330346}
}

@article{fds330345,
   Author = {Qiu, Q and Chang, Z and Draelos, M and Chen, J and Bronstein, A and Sapiro,
             G},
   Title = {Low-cost Gaze and Pulse Analysis using RealSense},
   Journal = {Proceedings of the 5th EAI International Conference on
             Wireless Mobile Communication and Healthcare},
   Pages = {276-279},
   Publisher = {ICST (Institute for Computer Sciences, Social-Informatics
             and Telecommunications Engineering)},
   Year = {2015},
   ISBN = {1631900889},
   url = {http://dx.doi.org/10.4108/eai.14-10-2015.2261657},
   Abstract = {Intel's newly-Announced low-cost and high precision
             RealSense 3D (RGBD) camera is becoming ubiquitous in laptops
             and mobile devices starting this year, opening the door for
             new applications in the mobile health arena. In this paper,
             we demonstrate how the Intel RealSense 3D camera can be used
             for low-cost gaze tracking and passive pulse rate
             estimation. We develop a novel 3D gaze and fixation tracker
             based on the eye surface geometry as well as an illumination
             invari-ant pulse rate estimation method using near-infrared
             images captured with RealSense. We achieve a mean error of 1
             cm at 20-30 cm for the gaze tracker and 2:26 bpm (beats per
             minute) for pulse estimation, which is adequate in many
             medical applications, demonstrating the great potential of
             novel consumer-grade RGBD technology in mobile
             health.},
   Doi = {10.4108/eai.14-10-2015.2261657},
   Key = {fds330345}
}

@article{fds322214,
   Author = {Draelos, M and Qiu, Q and Bronstein, A and Sapiro,
             G},
   Title = {Intel realsense= real low cost gaze},
   Journal = {Image Processing (ICIP), 2015 IEEE International Conference
             on},
   Volume = {2015-December},
   Pages = {2520-2524},
   Publisher = {IEEE},
   Year = {2015},
   ISBN = {9781479983391},
   url = {http://dx.doi.org/10.1109/ICIP.2015.7351256},
   Abstract = {Intel's newly-announced low-cost RealSense 3D camera claims
             significantly better precision than other currently
             available low-cost platforms and is expected to become
             ubiquitous in laptops and mobile devices starting this year.
             In this paper, we demonstrate for the first time that the
             RealSense camera can be easily converted into a real
             low-cost gaze tracker. Gaze has become increasingly relevant
             as an input for human-computer interaction due to its
             association with attention. It is also critical in clinical
             mental health diagnosis. We present a novel 3D gaze and
             fixation tracker based on the eye surface geometry captured
             with the RealSense 3D camera. First, eye surface 3D point
             clouds are segmented to extract the pupil center and iris
             using registered infrared images. With non-ellipsoid eye
             surface and single fixation point assumptions, pupil centers
             and iris normal vectors are used to first estimate gaze (for
             each eye), and then a single fixation point for both eyes
             simultaneously using a RANSAC-based approach. With a simple
             learned bias field correction model, the fixation tracker
             demonstrates mean error of approximately 1 cm at 20-30 cm,
             which is sufficiently adequate for gaze and fixation
             tracking in human-computer interaction and mental health
             diagnosis applications.},
   Doi = {10.1109/ICIP.2015.7351256},
   Key = {fds322214}
}

@article{fds264705,
   Author = {Tepper, M and Sapiro, G},
   Title = {A biclustering framework for consensus problems},
   Journal = {SIAM Journal on Imaging Sciences},
   Volume = {7},
   Number = {4},
   Pages = {2488-2552},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2014},
   Month = {November},
   url = {http://dx.doi.org/10.1137/140967325},
   Abstract = {We consider grouping as a general characterization for
             problems such as clustering, community detection in
             networks, and multiple parametric model estimation. We are
             interested in merging solutions from different grouping
             algorithms, distilling all their good qualities into a
             consensus solution. In this paper, we propose a biclustering
             framework and perspective for reaching consensus in such
             grouping problems. In particular, this is the first time
             that the task of finding/fitting multiple parametric models
             to a dataset is formally posed as a consensus problem. We
             highlight the equivalence of these tasks and establish the
             connection with the computational Gestalt program, which
             seeks to provide a psychologically inspired detection theory
             for visual events. We also present a simple but powerful
             biclustering algorithm, specially tuned to the nature of the
             problem we address, though general enough to handle many
             different instances inscribed within our characterization.
             The presentation is accompanied with diverse and extensive
             experimental results in clustering, community detection, and
             multiple parametric model estimation in image processing
             applications.},
   Doi = {10.1137/140967325},
   Key = {fds264705}
}

@article{fds264708,
   Author = {Yang, J and Yuan, X and Liao, X and Llull, P and Brady, DJ and Sapiro, G and Carin, L},
   Title = {Video compressive sensing using Gaussian mixture
             models.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {23},
   Number = {11},
   Pages = {4863-4878},
   Year = {2014},
   Month = {November},
   ISSN = {1057-7149},
   url = {http://dx.doi.org/10.1109/tip.2014.2344294},
   Abstract = {A Gaussian mixture model (GMM)-based algorithm is proposed
             for video reconstruction from temporally compressed video
             measurements. The GMM is used to model spatio-temporal video
             patches, and the reconstruction can be efficiently computed
             based on analytic expressions. The GMM-based inversion
             method benefits from online adaptive learning and parallel
             computation. We demonstrate the efficacy of the proposed
             inversion method with videos reconstructed from simulated
             compressive video measurements, and from a real compressive
             video camera. We also use the GMM as a tool to investigate
             adaptive video compressive sensing, i.e., adaptive rate of
             temporal compression.},
   Doi = {10.1109/tip.2014.2344294},
   Key = {fds264708}
}

@article{fds264709,
   Author = {Duarte-Carvajalino, JM and Lenglet, C and Xu, J and Yacoub, E and Ugurbil, K and Moeller, S and Carin, L and Sapiro,
             G},
   Title = {Estimation of the CSA-ODF using Bayesian compressed sensing
             of multi-shell HARDI.},
   Journal = {Magnetic resonance in medicine},
   Volume = {72},
   Number = {5},
   Pages = {1471-1485},
   Year = {2014},
   Month = {November},
   ISSN = {0740-3194},
   url = {http://dx.doi.org/10.1002/mrm.25046},
   Abstract = {<h4>Purpose</h4>Diffusion MRI provides important information
             about the brain white matter structures and has opened new
             avenues for neuroscience and translational research.
             However, acquisition time needed for advanced applications
             can still be a challenge in clinical settings. There is
             consequently a need to accelerate diffusion MRI
             acquisitions.<h4>Methods</h4>A multi-task Bayesian
             compressive sensing (MT-BCS) framework is proposed to
             directly estimate the constant solid angle orientation
             distribution function (CSA-ODF) from under-sampled (i.e.,
             accelerated image acquisition) multi-shell high angular
             resolution diffusion imaging (HARDI) datasets, and
             accurately recover HARDI data at higher resolution in
             q-space. The proposed MT-BCS approach exploits the spatial
             redundancy of the data by modeling the statistical
             relationships within groups (clusters) of diffusion signal.
             This framework also provides uncertainty estimates of the
             computed CSA-ODF and diffusion signal, directly computed
             from the compressive measurements. Experiments validating
             the proposed framework are performed using realistic
             multi-shell synthetic images and in vivo multi-shell high
             angular resolution HARDI datasets.<h4>Results</h4>Results
             indicate a practical reduction in the number of required
             diffusion volumes (q-space samples) by at least a factor of
             four to estimate the CSA-ODF from multi-shell
             data.<h4>Conclusion</h4>This work presents, for the first
             time, a multi-task Bayesian compressive sensing approach to
             simultaneously estimate the full posterior of the CSA-ODF
             and diffusion-weighted volumes from multi-shell HARDI
             acquisitions. It demonstrates improvement of the quality of
             acquired datasets by means of CS de-noising, and accurate
             estimation of the CSA-ODF, as well as enables a reduction in
             the acquisition time by a factor of two to four, especially
             when "staggered" q-space sampling schemes are used. The
             proposed MT-BCS framework can naturally be combined with
             parallel MR imaging to further accelerate HARDI
             acquisitions.},
   Doi = {10.1002/mrm.25046},
   Key = {fds264709}
}

@article{fds264707,
   Author = {Yuan, X and Llull, P and Liao, X and Yang, J and Brady, DJ and Sapiro, G and Carin, L},
   Title = {Low-cost compressive sensing for color video and
             depth},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Pages = {3318-3325},
   Publisher = {IEEE},
   Year = {2014},
   Month = {September},
   ISBN = {9781479951178},
   ISSN = {1063-6919},
   url = {http://dx.doi.org/10.1109/CVPR.2014.424},
   Abstract = {A simple and inexpensive (low-power and low-bandwidth)
             modification is made to a conventional off-the-shelf color
             video camera, from which we recover multiple color frames
             for each of the original measured frames, and each of the
             recovered frames can be focused at a different depth. The
             recovery of multiple frames for each measured frame is made
             possible via high-speed coding, manifested via translation
             of a single coded aperture, the inexpensive translation is
             constituted by mounting the binary code on a piezoelectric
             device. To simultaneously recover depth information, a
             liquid lens is modulated at high speed, via a variable
             voltage. Consequently, during the aforementioned coding
             process, the liquid lens allows the camera to sweep the
             focus through multiple depths. In addition to designing and
             implementing the camera, fast recovery is achieved by an
             anytime algorithm exploiting the group-sparsity of
             wavelet/DCT coefficients.},
   Doi = {10.1109/CVPR.2014.424},
   Key = {fds264707}
}

@article{fds264710,
   Author = {Kim, J and Lenglet, C and Duchin, Y and Sapiro, G and Harel,
             N},
   Title = {Semiautomatic segmentation of brain subcortical structures
             from high-field MRI.},
   Journal = {IEEE journal of biomedical and health informatics},
   Volume = {18},
   Number = {5},
   Pages = {1678-1695},
   Year = {2014},
   Month = {September},
   ISSN = {2168-2194},
   url = {http://dx.doi.org/10.1109/jbhi.2013.2292858},
   Abstract = {Volumetric segmentation of subcortical structures, such as
             the basal ganglia and thalamus, is necessary for noninvasive
             diagnosis and neurosurgery planning. This is a challenging
             problem due in part to limited boundary information between
             structures, similar intensity profiles across the different
             structures, and low contrast data. This paper presents a
             semiautomatic segmentation system exploiting the superior
             image quality of ultrahigh field (7 T) MRI. The proposed
             approach utilizes the complementary edge information in the
             multiple structural MRI modalities. It combines optimally
             selected two modalities from susceptibility-weighted,
             T2-weighted, and diffusion MRI, and introduces a tailored
             new edge indicator function. In addition to this, we employ
             prior shape and configuration knowledge of the subcortical
             structures in order to guide the evolution of geometric
             active surfaces. Neighboring structures are segmented
             iteratively, constraining oversegmentation at their borders
             with a nonoverlapping penalty. Several experiments with data
             acquired on a 7 T MRI scanner demonstrate the feasibility
             and power of the approach for the segmentation of basal
             ganglia components critical for neurosurgery applications
             such as deep brain stimulation surgery.},
   Doi = {10.1109/jbhi.2013.2292858},
   Key = {fds264710}
}

@article{fds264697,
   Author = {Yoo, TS and Lowekamp, BC and Kuybeda, O and Narayan, K and Frank, GA and Bartesaghi, A and Borgnia, M and Subramaniam, S and Sapiro, G and Ackerman, MJ},
   Title = {Accelerating discovery in 3D microanalysis: Leveraging open
             source software and deskside high performance
             computing},
   Journal = {Microscopy and Microanalysis},
   Volume = {20},
   Number = {3},
   Pages = {774-775},
   Publisher = {Cambridge University Press (CUP)},
   Year = {2014},
   Month = {August},
   ISSN = {1431-9276},
   url = {http://dx.doi.org/10.1017/S1431927614005595},
   Doi = {10.1017/S1431927614005595},
   Key = {fds264697}
}

@article{fds264720,
   Author = {Prasad, G and Joshi, SH and Jahanshad, N and Villalon-Reina, J and Aganj, I and Lenglet, C and Sapiro, G and McMahon, KL and de Zubicaray,
             GI and Martin, NG and Wright, MJ and Toga, AW and Thompson,
             PM},
   Title = {Automatic clustering and population analysis of white matter
             tracts using maximum density paths.},
   Journal = {NeuroImage},
   Volume = {97},
   Pages = {284-295},
   Year = {2014},
   Month = {August},
   ISSN = {1053-8119},
   url = {http://dx.doi.org/10.1016/j.neuroimage.2014.04.033},
   Abstract = {We introduce a framework for population analysis of white
             matter tracts based on diffusion-weighted images of the
             brain. The framework enables extraction of fibers from high
             angular resolution diffusion images (HARDI); clustering of
             the fibers based partly on prior knowledge from an atlas;
             representation of the fiber bundles compactly using a path
             following points of highest density (maximum density path;
             MDP); and registration of these paths together using
             geodesic curve matching to find local correspondences across
             a population. We demonstrate our method on 4-Tesla HARDI
             scans from 565 young adults to compute localized statistics
             across 50 white matter tracts based on fractional anisotropy
             (FA). Experimental results show increased sensitivity in the
             determination of genetic influences on principal fiber
             tracts compared to the tract-based spatial statistics (TBSS)
             method. Our results show that the MDP representation reveals
             important parts of the white matter structure and
             considerably reduces the dimensionality over comparable
             fiber matching approaches.},
   Doi = {10.1016/j.neuroimage.2014.04.033},
   Key = {fds264720}
}

@article{fds264723,
   Author = {Harrison, BD and Hashemi, J and Bibi, M and Pulver, R and Bavli, D and Nahmias, Y and Wellington, M and Sapiro, G and Berman,
             J},
   Title = {A tetraploid intermediate precedes aneuploid formation in
             yeasts exposed to fluconazole.},
   Journal = {PLoS biology},
   Volume = {12},
   Number = {3},
   Pages = {e1001815},
   Year = {2014},
   Month = {March},
   ISSN = {1544-9173},
   url = {http://dx.doi.org/10.1371/journal.pbio.1001815},
   Abstract = {Candida albicans, the most prevalent human fungal pathogen,
             is generally diploid. However, 50% of isolates that are
             resistant to fluconazole (FLC), the most widely used
             antifungal, are aneuploid and some aneuploidies can confer
             FLC resistance. To ask if FLC exposure causes or only
             selects for aneuploidy, we analyzed diploid strains during
             exposure to FLC using flow cytometry and epifluorescence
             microscopy. FLC exposure caused a consistent deviation from
             normal cell cycle regulation: nuclear and spindle cycles
             initiated prior to bud emergence, leading to "trimeras,"
             three connected cells composed of a mother, daughter, and
             granddaughter bud. Initially binucleate, trimeras underwent
             coordinated nuclear division yielding four daughter nuclei,
             two of which underwent mitotic collapse to form a tetraploid
             cell with extra spindle components. In subsequent cell
             cycles, the abnormal number of spindles resulted in unequal
             DNA segregation and viable aneuploid progeny. The process of
             aneuploid formation in C. albicans is highly reminiscent of
             early stages in human tumorigenesis in that aneuploidy
             arises through a tetraploid intermediate and subsequent
             unequal DNA segregation driven by multiple spindles coupled
             with a subsequent selective advantage conferred by at least
             some aneuploidies during growth under stress. Finally,
             trimera formation was detected in response to other azole
             antifungals, in related Candida species, and in an in vivo
             model for Candida infection, suggesting that aneuploids
             arise due to azole treatment of several pathogenic yeasts
             and that this can occur during the infection
             process.},
   Doi = {10.1371/journal.pbio.1001815},
   Key = {fds264723}
}

@article{fds264699,
   Author = {Tepper, M and Sapiro, G},
   Title = {Intersecting 2D lines: A simple method for detecting
             vanishing points},
   Journal = {2014 IEEE International Conference on Image Processing, ICIP
             2014},
   Pages = {1056-1060},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISBN = {9781479957514},
   url = {http://dx.doi.org/10.1109/ICIP.2014.7025210},
   Abstract = {We present a simple and powerful technique for testing with
             a prescribed precision whether a set of 2D lines meet at a
             given point. The method is based on a probabilistic
             framework and has a fundamental geometric interpretation. We
             use this technique for detecting vanishing points in images.
             We developed a very simple algorithm that yields
             state-of-the-art results at a much lower computational cost
             than its competitors. The presentation of the proposed
             formulation is complemented with numerous
             examples.},
   Doi = {10.1109/ICIP.2014.7025210},
   Key = {fds264699}
}

@article{fds264700,
   Author = {Qiu, Q and Sapiro, G},
   Title = {Learning Transformations},
   Journal = {2014 IEEE International Conference on Image Processing, ICIP
             2014},
   Pages = {4008-4012},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISBN = {9781479957514},
   url = {http://dx.doi.org/10.1109/ICIP.2014.7025814},
   Abstract = {A low-rank transformation learning framework for subspace
             clustering and classification is here proposed. Many
             high-dimensional data, such as face images and motion
             sequences, approximately lie in a union of low-dimensional
             subspaces. The corresponding subspace clustering problem has
             been extensively studied in the literature, partitioning
             such high-dimensional data into clusters corresponding to
             their underlying low-dimensional subspaces. However,
             low-dimensional intrinsic structures are often violated for
             real-world observations, as they can be corrupted by errors
             or deviate from ideal models. We propose to address this by
             learning a linear transformation on subspaces using matrix
             rank, via its convex surrogate nuclear norm, as the
             optimization criteria. The learned linear transformation
             restores a low-rank structure for data from the same
             subspace, and, at the same time, forces a high-rank
             structure for data from different subspaces. In this way, we
             reduce variations within the subspaces, and increase
             separation between the subspaces for improved subspace
             clustering and classification.},
   Doi = {10.1109/ICIP.2014.7025814},
   Key = {fds264700}
}

@article{fds264701,
   Author = {Qiu, Q and Sapiro, G},
   Title = {Learning compressed image classification
             features},
   Journal = {2014 IEEE International Conference on Image Processing, ICIP
             2014},
   Pages = {5761-5765},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISBN = {9781479957514},
   url = {http://dx.doi.org/10.1109/ICIP.2014.7026165},
   Abstract = {Learning a transformation-based dimension reduction, thereby
             compressive, technique for classification is here proposed.
             High-dimensional data often approximately lie in a union of
             low-dimensional subspaces. We propose to perform dimension
             reduction by learning a 'fat' linear transformation matrix
             on subspaces using nuclear norm as the optimization
             criteria. The learned transformation enables dimension
             reduction, and, at the same time, restores a low-rank
             structure for data from the same class and maximizes the
             separation between different classes, thereby improving
             classification via learned low-dimensional features.
             Theoretical and experimental results support the proposed
             framework, which can be interpreted as learning compressing
             sensing matrices for classification.},
   Doi = {10.1109/ICIP.2014.7026165},
   Key = {fds264701}
}

@article{fds264711,
   Author = {Tepper, M and Sapiro, G},
   Title = {All for one, one for all: Consensus community detection in
             networks},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {1075-1079},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2014.6853762},
   Abstract = {Given an universe of distinct, low-level communities of a
             network, we aim at identifying the 'meaningful' and
             consistent communities in this universe. We address this as
             the process of obtaining consensual community detections and
             formalize it as a bi-clustering problem. While most
             consensus algorithms only take into account pairwise
             relations and end up analyzing a huge matrix, our proposed
             characterization of the consensus problem (1) does not drop
             useful information, and (2) analyzes a much smaller matrix,
             rendering the problem tractable for large networks. We also
             propose a new pa-rameterless bi-clustering algorithm, fit
             for the type of matrices we analyze. The approach has proven
             successful in a very diverse set of experiments, ranging
             from unifying the results of multiple community detection
             algorithms to finding common communities from multi-modal or
             noisy networks. © 2014 IEEE.},
   Doi = {10.1109/ICASSP.2014.6853762},
   Key = {fds264711}
}

@article{fds264712,
   Author = {Carpenter, K and Sprechmann, P and Fiori, M and Calderbank, R and Egger,
             H and Sapiro, G},
   Title = {Questionnaire simplification for fast risk analysis of
             children's mental health},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {6009-6013},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2014.6854757},
   Abstract = {Early detection and treatment of psychiatric disorders on
             children has shown significant impact in their subsequent
             development and quality of life. The assessment of
             psychopathology in childhood is commonly carried out by
             performing long comprehensive interviews such as the widely
             used Preschool Age Psychiatric Assessment (PAPA).
             Unfortunately, the time required to complete a full
             interview is too long to apply it at the scale of the actual
             population at risk, and most of the population goes
             undiagnosed or is diagnosed significantly later than
             desired. In this work, we aim to learn from unique and very
             rich previously collected PAPA examples the
             inter-correlations between different questions in order to
             provide a reliable risk analysis in the form of a much
             shorter interview. This helps to put such important risk
             analysis at the hands of regular practitioners, including
             teachers and family doctors. We use for this purpose the
             alternating decision trees algorithm, which combines
             decision trees with boosting to produce small and
             interpretable decision rules. Rather than a binary
             prediction, the algorithm provides a measure of confidence
             in the classification outcome. This is highly desirable from
             a clinical perspective, where it is preferable to abstain a
             decision on the low-confidence cases and recommend further
             screening. In order to prevent over-fitting, we propose to
             use network inference analysis to predefine a set of
             candidate question with consistent high correlation with the
             diagnosis. We report encouraging results with high levels of
             prediction using two independently collected datasets. The
             length and accuracy of the developed method suggests that it
             could be a valuable tool for preliminary evaluation in
             everyday care. © 2014 IEEE.},
   Doi = {10.1109/ICASSP.2014.6854757},
   Key = {fds264712}
}

@article{fds264714,
   Author = {Sprechmann, P and Bronstein, AM and Sapiro, G},
   Title = {Supervised non-euclidean sparse NMF via bilevel optimization
             with applications to speech enhancement},
   Journal = {2014 4th Joint Workshop on Hands-Free Speech Communication
             and Microphone Arrays, HSCMA 2014},
   Pages = {11-15},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   url = {http://dx.doi.org/10.1109/HSCMA.2014.6843241},
   Abstract = {Traditionally, NMF algorithms consist of two separate
             stages: a training stage, in which a generative model is
             learned; and a testing stage in which the pre-learned model
             is used in a high level task such as enhancement,
             separation, or classification. As an alternative, we propose
             a task-supervised NMF method for the adaptation of the basis
             spectra learned in the first stage to enhance the
             performance on the specific task used in the second stage.
             We cast this problem as a bilevel optimization program that
             can be efficiently solved via stochastic gradient descent.
             The proposed approach is general enough to handle sparsity
             priors of the activations, and allow non-Euclidean data
             terms such as β-divergences. The framework is evaluated on
             single-channel speech enhancement tasks. © 2014
             IEEE.},
   Doi = {10.1109/HSCMA.2014.6843241},
   Key = {fds264714}
}

@article{fds304067,
   Author = {Fiori, M and Musé, P and Sapiro, G},
   Title = {A complete system for candidate polyps detection in virtual
             colonoscopy},
   Journal = {International Journal of Pattern Recognition and Artificial
             Intelligence},
   Volume = {28},
   Number = {7},
   Pages = {1460014-1460014},
   Publisher = {World Scientific Pub Co Pte Lt},
   Year = {2014},
   Month = {January},
   url = {http://arxiv.org/abs/1209.6525v1},
   Abstract = {We present a computer-aided detection pipeline for polyp
             detection in Computer tomographic colonography. The first
             stage of the pipeline consists of a simple colon
             segmentation technique that enhances polyps, which is
             followed by an adaptive-scale candidate polyp delineation,
             in order to capture the appropriate polyp size. In the last
             step, candidates are classified based on new texture and
             geometric features that consider both the information in the
             candidate polyp location and its immediate surrounding area.
             The system is tested with ground truth data, including flat
             and small polyps which are hard to detect even with optical
             colonoscopy. We achieve 100% sensitivity for polyps larger
             than 6mm in size with just 0.9 false positives per case, and
             93% sensitivity with 2.8 false positives per case for polyps
             larger than 3mm in size.},
   Doi = {10.1142/S0218001414600143},
   Key = {fds304067}
}

@article{fds345825,
   Author = {Qiu, Q and Sapiro, G},
   Title = {Learning transformations for classification
             forests},
   Journal = {2nd International Conference on Learning Representations,
             ICLR 2014 - Conference Track Proceedings},
   Year = {2014},
   Month = {January},
   Abstract = {© 2014 International Conference on Learning
             Representations, ICLR. All rights reserved. This work
             introduces a transformation-based learner model for
             classification forests. The weak learner at each split node
             plays a crucial role in a classification tree. We propose to
             optimize the splitting objective by learning a linear
             transformation on subspaces using nuclear norm as the
             optimization criteria. The learned linear transformation
             restores a low-rank structure for data from the same class,
             and, at the same time, maximizes the separation between
             different classes, thereby improving the performance of the
             split function. Theoretical and experimental results support
             the proposed framework.},
   Key = {fds345825}
}

@article{fds345826,
   Author = {Masci, J and Bronstein, AM and Bronstein, MM and Sprechmann, P and Sapiro, G},
   Title = {Sparse similarity-preserving hashing},
   Journal = {2nd International Conference on Learning Representations,
             ICLR 2014 - Conference Track Proceedings},
   Year = {2014},
   Month = {January},
   Abstract = {© 2014 International Conference on Learning
             Representations, ICLR. All rights reserved. In recent years,
             a lot of attention has been devoted to efficient nearest
             neighbor search by means of similarity-preserving hashing.
             One of the plights of existing hashing techniques is the
             intrinsic trade-off between performance and computational
             complexity: while longer hash codes allow for lower false
             positive rates, it is very difficult to increase the
             embedding dimensionality without incurring in very high
             false negatives rates or prohibiting computational costs. In
             this paper, we propose a way to overcome this limitation by
             enforcing the hash codes to be sparse. Sparse
             high-dimensional codes enjoy from the low false positive
             rates typical of long hashes, while keeping the false
             negative rates similar to those of a shorter dense hashing
             scheme with equal number of degrees of freedom. We use a
             tailored feed-forward neural network for the hashing
             function. Extensive experimental evaluation involving visual
             and multimodal data shows the benefits of the proposed
             method.},
   Key = {fds345826}
}

@article{fds322691,
   Author = {Dennis, EL and Zhan, L and Jahanshad, N and Mueller, BA and Jin, Y and Lenglet, C and Yacoub, E and Sapiro, G and Ugurbil, K and Harel, N and Toga, A and Lim, KO and Thompson, PM},
   Title = {Rich club analysis of structural brain connectivity at 7
             tesla versus 3 tesla},
   Journal = {Mathematics and Visualization},
   Pages = {209-218},
   Publisher = {Springer International Publishing},
   Year = {2014},
   Month = {January},
   ISBN = {9783319024745},
   url = {http://dx.doi.org/10.1007/978-3-319-02475-2_19},
   Abstract = {The ‘rich club’ is a relatively new concept in brain
             connectivity analysis, which identifies a core of densely
             interconnected high-degree nodes. Establishing normative
             measures for rich club organization is vital, as is
             understanding how scanning parameters affect it.We compared
             the rich club organization in 23 subjects scanned at both 7
             and 3 T, with 128-gradient high angular resolution diffusion
             imaging (HARDI). The rich club coefficient (RCC) did not
             differ significantly between low and high field scans, but
             the field strength did affect which nodes were included in
             the rich club. We also examined 3 subjects with
             Alzheimer’s disease and 3 healthy elderly controls to see
             how field strength affected the statistical comparison. RCC
             did not differ with field strength, but again, which nodes
             differed between groups did. These results illustrate how
             one key parameter, scanner field strength, impacts rich club
             organization – a promising concept in brain connectomics
             research.},
   Doi = {10.1007/978-3-319-02475-2_19},
   Key = {fds322691}
}

@article{fds349518,
   Author = {Masci, J and Bronstein, AM and Bronstein, MM and Sprechmann, P and Sapiro, G},
   Title = {Sparse similarity-preserving hashing},
   Journal = {2nd International Conference on Learning Representations,
             ICLR 2014 - Conference Track Proceedings},
   Year = {2014},
   Month = {January},
   Abstract = {In recent years, a lot of attention has been devoted to
             efficient nearest neighbor search by means of
             similarity-preserving hashing. One of the plights of
             existing hashing techniques is the intrinsic trade-off
             between performance and computational complexity: while
             longer hash codes allow for lower false positive rates, it
             is very difficult to increase the embedding dimensionality
             without incurring in very high false negatives rates or
             prohibiting computational costs. In this paper, we propose a
             way to overcome this limitation by enforcing the hash codes
             to be sparse. Sparse high-dimensional codes enjoy from the
             low false positive rates typical of long hashes, while
             keeping the false negative rates similar to those of a
             shorter dense hashing scheme with equal number of degrees of
             freedom. We use a tailored feed-forward neural network for
             the hashing function. Extensive experimental evaluation
             involving visual and multimodal data shows the benefits of
             the proposed method.},
   Key = {fds349518}
}

@article{fds349519,
   Author = {Qiu, Q and Sapiro, G},
   Title = {Learning transformations for classification
             forests},
   Journal = {2nd International Conference on Learning Representations,
             ICLR 2014 - Conference Track Proceedings},
   Year = {2014},
   Month = {January},
   Abstract = {This work introduces a transformation-based learner model
             for classification forests. The weak learner at each split
             node plays a crucial role in a classification tree. We
             propose to optimize the splitting objective by learning a
             linear transformation on subspaces using nuclear norm as the
             optimization criteria. The learned linear transformation
             restores a low-rank structure for data from the same class,
             and, at the same time, maximizes the separation between
             different classes, thereby improving the performance of the
             split function. Theoretical and experimental results support
             the proposed framework.},
   Key = {fds349519}
}

@article{fds350116,
   Author = {Fedkiw, R and Morel, JM and Sapiro, G and Shu, CW and Yin,
             W},
   Title = {The work of Stanley Osher},
   Journal = {Proceeding of the International Congress of Mathematicans,
             ICM 2014},
   Volume = {1},
   Pages = {90-113},
   Year = {2014},
   Month = {January},
   ISBN = {9788961058049},
   Abstract = {In this paper we briefly present some of Stanley Osher's
             contributions in the areas of high resolution shock
             capturing methods, level set methods, partial differential
             equation (PDE) based methods in computer vision and image
             processing, and optimization. His numerical analysis
             contributions, including the Engquist-Osher scheme, total
             variation diminishing (TVD) schemes, entropy conditions,
             essentially non-oscillatory (ENO) and weighted ENO (WENO)
             schemes and numerical schemes for Hamilton-Jacobi type
             equations have revolutionized the field. His level set
             contributions include new level set calculus, novel
             numerical techniques, fluids and materials modeling,
             variational approaches, high codimension motion analysis,
             geometric optics, and the computation of discontinuous
             solutions to Hamilton-Jacobi equations. As we will further
             detail in this paper, the level set method, together with
             his total variation contributions, have been extremely
             influential in computer vision, image processing, and
             computer graphics. On top of that, such new methods have
             motivated some of the most fundamental studies in the theory
             of PDEs in recent years, completing the picture of applied
             mathematics inspiring pure mathematics. On optimization, he
             introduced Bregman algorithms and applied them to problems
             in a variety of contexts such as image processing,
             compressive sensing, signal processing, and machine
             learning. Finally, we will comment on Osher's
             entrepreneurship and how he brought his mathematics to
             industry.},
   Key = {fds350116}
}

@article{fds264715,
   Author = {Hashemi, J and Tepper, M and Vallin Spina and T and Esler, A and Morellas,
             V and Papanikolopoulos, N and Egger, H and Dawson, G and Sapiro,
             G},
   Title = {Computer vision tools for low-cost and noninvasive
             measurement of autism-related behaviors in
             infants.},
   Journal = {Autism Res Treat},
   Volume = {2014},
   Pages = {935686},
   Year = {2014},
   ISSN = {2090-1925},
   url = {http://hdl.handle.net/10161/9547 Duke open
             access},
   Abstract = {The early detection of developmental disorders is key to
             child outcome, allowing interventions to be initiated which
             promote development and improve prognosis. Research on
             autism spectrum disorder (ASD) suggests that behavioral
             signs can be observed late in the first year of life. Many
             of these studies involve extensive frame-by-frame video
             observation and analysis of a child's natural behavior.
             Although nonintrusive, these methods are extremely
             time-intensive and require a high level of observer
             training; thus, they are burdensome for clinical and large
             population research purposes. This work is a first milestone
             in a long-term project on non-invasive early observation of
             children in order to aid in risk detection and research of
             neurodevelopmental disorders. We focus on providing low-cost
             computer vision tools to measure and identify ASD behavioral
             signs based on components of the Autism Observation Scale
             for Infants (AOSI). In particular, we develop algorithms to
             measure responses to general ASD risk assessment tasks and
             activities outlined by the AOSI which assess visual
             attention by tracking facial features. We show results,
             including comparisons with expert and nonexpert clinicians,
             which demonstrate that the proposed computer vision tools
             can capture critical behavioral observations and potentially
             augment the clinician's behavioral observations obtained
             from real in-clinic assessments.},
   Doi = {10.1155/2014/935686},
   Key = {fds264715}
}

@article{fds264725,
   Author = {Tepper, M and Sapiro, G},
   Title = {Ants crawling to discover the community structure in
             networks},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {8259 LNCS},
   Number = {PART 2},
   Pages = {552-559},
   Publisher = {Springer Berlin Heidelberg},
   Year = {2013},
   Month = {December},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-642-41827-3_69},
   Abstract = {We cast the problem of discovering the community structure
             in networks as the composition of community candidates,
             obtained from several community detection base algorithms,
             into a coherent structure. In turn, this composition can be
             cast into a maximum-weight clique problem, and we propose an
             ant colony optimization algorithm to solve it. Our results
             show that the proposed method is able to discover better
             community structures, according to several evaluation
             criteria, than the ones obtained with the base algorithms.
             It also outperforms, both in quality and in speed, the
             recently introduced FG-Tiling algorithm. © Springer-Verlag
             2013.},
   Doi = {10.1007/978-3-642-41827-3_69},
   Key = {fds264725}
}

@article{fds264726,
   Author = {Walczak, N and Fasching, J and Toczyski, WD and Morellas, V and Sapiro,
             G and Papanikolopoulos, N},
   Title = {Locating occupants in preschool classrooms using a multiple
             RGB-D sensor system},
   Journal = {IEEE International Conference on Intelligent Robots and
             Systems},
   Pages = {2166-2172},
   Publisher = {IEEE},
   Year = {2013},
   Month = {December},
   ISSN = {2153-0858},
   url = {http://dx.doi.org/10.1109/IROS.2013.6696659},
   Abstract = {Presented are results demonstrating that, in developing a
             system with its first objective being the sustained
             detection of adults and young children as they move and
             interact in a normal preschool setting, the direct
             application of the straightforward RGB-D innovations
             presented here significantly outperforms even far more
             algorithmically advanced methods relying solely on images.
             The use of multiple RGB-D sensors by this project for
             depth-aware object localization economically resolves
             numerous issues regularly frustrating earlier vision-only
             detection and human surveillance methods, issues such as
             occlusions, illumination changes, unexpected postures,
             atypical morphologies, erratic or unanticipated motions,
             reflections, and misleading textures and colorations. This
             multiple RGB-D installation forms the front-end for a
             multi-step pipeline, the first portion of which seeks to
             isolate, in situ, 3D renderings of classroom occupants
             sufficient for a later analysis of their behaviors and
             interactions. Towards this end, a voxel-based approach to
             foreground/background separation and an effective adaptation
             of supervoxel clustering for 3D were developed, and 3D and
             image-only methods were tested and compared. The project's
             setting is highly challenging, but then so are its longer
             term goals: the automated detection of early childhood
             precursors, ofttimes very subtle, to a number of
             increasingly common developmental disorders. © 2013
             IEEE.},
   Doi = {10.1109/IROS.2013.6696659},
   Key = {fds264726}
}

@article{fds264727,
   Author = {Fiori, M and Musé, P and Sapiro, G},
   Title = {Polyps flagging in virtual colonoscopy},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {8259 LNCS},
   Number = {PART 2},
   Pages = {181-189},
   Publisher = {Springer Berlin Heidelberg},
   Year = {2013},
   Month = {December},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-642-41827-3_23},
   Abstract = {Computer tomographic colonography, combined with
             computer-aided detection, is a promising emerging technique
             for colonic polyp analysis. We present a complete pipeline
             for polyp detection, starting with a simple colon
             segmentation technique that enhances polyps, followed by an
             adaptive-scale candidate polyp delineation and
             classification based on new texture and geometric features
             that consider both the information in the candidate polyp
             and its immediate surrounding area. The proposed system is
             tested with ground truth data, including challenging flat
             and small polyps. For polyps larger than 6mm in size we
             achieve 100% sensitivity with just 0.9 false positives per
             case, and for polyps larger than 3mm in size we achieve 93%
             sensitivity with 2.8 false positives per case. ©
             Springer-Verlag 2013.},
   Doi = {10.1007/978-3-642-41827-3_23},
   Key = {fds264727}
}

@article{fds264728,
   Author = {Sprechmann, P and Bronstein, A and Bronstein, M and Sapiro,
             G},
   Title = {Learnable low rank sparse models for speech
             denoising},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {136-140},
   Publisher = {IEEE},
   Year = {2013},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2013.6637624},
   Abstract = {In this paper we present a framework for real time
             enhancement of speech signals. Our method leverages a new
             process-centric approach for sparse and parsimonious models,
             where the representation pursuit is obtained applying a
             deterministic function or process rather than solving an
             optimization problem. We first propose a rank-regularized
             robust version of non-negative matrix factorization (NMF)
             for modeling time-frequency representations of speech
             signals in which the spectral frames are decomposed as
             sparse linear combinations of atoms of a low-rank
             dictionary. Then, a parametric family of pursuit processes
             is derived from the iteration of the proximal descent method
             for solving this model. We present several experiments
             showing successful results and the potential of the proposed
             framework. Incorporating discriminative learning makes the
             proposed method significantly outperform exact NMF
             algorithms, with fixed latency and at a fraction of it's
             computational complexity. © 2013 IEEE.},
   Doi = {10.1109/ICASSP.2013.6637624},
   Key = {fds264728}
}

@article{fds264729,
   Author = {Sprechmann, P and Bronstein, A and Morel, JM and Sapiro,
             G},
   Title = {Audio restoration from multiple copies},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {878-882},
   Publisher = {IEEE},
   Year = {2013},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2013.6637774},
   Abstract = {A method for removing impulse noise from audio signals by
             fusing multiple copies of the same recording is introduced
             in this paper. The proposed algorithm exploits the fact that
             while in general multiple copies of a given recording are
             available, all sharing the same master, most degradations in
             audio signals are record-dependent. Our method first seeks
             for the optimal non-rigid alignment of the signals that is
             robust to the presence of sparse outliers with arbitrary
             magnitude. Unlike previous approaches, we simultaneously
             find the optimal alignment of the signals and impulsive
             degradation. This is obtained via continuous dynamic time
             warping computed solving an Eikonal equation. We propose to
             use our approach in the derivative domain, reconstructing
             the signal by solving an inverse problem that resembles the
             Poisson image editing technique. The proposed framework is
             here illustrated and tested in the restoration of old
             gramophone recordings showing promising results; however, it
             can be used in other applications where different copies of
             the signal of interest are available and the degradations
             are copy-dependent. © 2013 IEEE.},
   Doi = {10.1109/ICASSP.2013.6637774},
   Key = {fds264729}
}

@article{fds264730,
   Author = {Cetingul, HE and Dumont, L and Nadar, MS and Thompson, PM and Sapiro, G and Lenglet, C},
   Title = {Importance sampling spherical harmonics to improve
             probabilistic tractography},
   Journal = {Proceedings - 2013 3rd International Workshop on Pattern
             Recognition in Neuroimaging, PRNI 2013},
   Pages = {46-49},
   Publisher = {IEEE},
   Year = {2013},
   Month = {October},
   url = {http://dx.doi.org/10.1109/PRNI.2013.21},
   Abstract = {We consider the problem of improving the accuracy and
             reliability of probabilistic white matter tractography
             methods by improving the built-in sampling scheme, which
             randomly draws, from a diffusion model such as the
             orientation distribution function (ODF), a direction of
             propagation. Existing methods employing inverse transform
             sampling require an ad hoc thresholding step to prevent the
             less likely directions from being sampled. We herein propose
             to perform importance sampling of spherical harmonics, which
             redistributes an input point set on the sphere to match the
             ODF using hierarchical sample warping. This produces a point
             set that is more concentrated around the modes, allowing the
             subsequent inverse transform sampling to generate
             orientations that are in better accordance with the local
             fiber configuration. Integrated into a Kalman filter-based
             framework, our approach is evaluated through experiments on
             synthetic, phantom, and real datasets. © 2013
             IEEE.},
   Doi = {10.1109/PRNI.2013.21},
   Key = {fds264730}
}

@article{fds264737,
   Author = {Uğurbil, K and Xu, J and Auerbach, EJ and Moeller, S and Vu, AT and Duarte-Carvajalino, JM and Lenglet, C and Wu, X and Schmitter, S and Van
             de Moortele, PF and Strupp, J and Sapiro, G and De Martino and F and Wang,
             D and Harel, N and Garwood, M and Chen, L and Feinberg, DA and Smith, SM and Miller, KL and Sotiropoulos, SN and Jbabdi, S and Andersson, JLR and Behrens, TEJ and Glasser, MF and Van Essen and DC and Yacoub, E and WU-Minn
             HCP Consortium},
   Title = {Pushing spatial and temporal resolution for functional and
             diffusion MRI in the Human Connectome Project.},
   Journal = {NeuroImage},
   Volume = {80},
   Pages = {80-104},
   Year = {2013},
   Month = {October},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23702417},
   Abstract = {The Human Connectome Project (HCP) relies primarily on three
             complementary magnetic resonance (MR) methods. These are: 1)
             resting state functional MR imaging (rfMRI) which uses
             correlations in the temporal fluctuations in an fMRI time
             series to deduce 'functional connectivity'; 2) diffusion
             imaging (dMRI), which provides the input for tractography
             algorithms used for the reconstruction of the complex axonal
             fiber architecture; and 3) task based fMRI (tfMRI), which is
             employed to identify functional parcellation in the human
             brain in order to assist analyses of data obtained with the
             first two methods. We describe technical improvements and
             optimization of these methods as well as instrumental
             choices that impact speed of acquisition of fMRI and dMRI
             images at 3T, leading to whole brain coverage with 2 mm
             isotropic resolution in 0.7 s for fMRI, and 1.25 mm
             isotropic resolution dMRI data for tractography analysis
             with three-fold reduction in total dMRI data acquisition
             time. Ongoing technical developments and optimization for
             acquisition of similar data at 7 T magnetic field are also
             presented, targeting higher spatial resolution, enhanced
             specificity of functional imaging signals, mitigation of the
             inhomogeneous radio frequency (RF) fields, and reduced power
             deposition. Results demonstrate that overall, these
             approaches represent a significant advance in MR imaging of
             the human brain to investigate brain function and
             structure.},
   Doi = {10.1016/j.neuroimage.2013.05.012},
   Key = {fds264737}
}

@article{fds264738,
   Author = {Sotiropoulos, SN and Jbabdi, S and Xu, J and Andersson, JL and Moeller,
             S and Auerbach, EJ and Glasser, MF and Hernandez, M and Sapiro, G and Jenkinson, M and Feinberg, DA and Yacoub, E and Lenglet, C and Van
             Essen, DC and Ugurbil, K and Behrens, TEJ and WU-Minn HCP
             Consortium},
   Title = {Advances in diffusion MRI acquisition and processing in the
             Human Connectome Project.},
   Journal = {NeuroImage},
   Volume = {80},
   Pages = {125-143},
   Year = {2013},
   Month = {October},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23702418},
   Abstract = {The Human Connectome Project (HCP) is a collaborative 5-year
             effort to map human brain connections and their variability
             in healthy adults. A consortium of HCP investigators will
             study a population of 1200 healthy adults using multiple
             imaging modalities, along with extensive behavioral and
             genetic data. In this overview, we focus on diffusion MRI
             (dMRI) and the structural connectivity aspect of the
             project. We present recent advances in acquisition and
             processing that allow us to obtain very high-quality in-vivo
             MRI data, whilst enabling scanning of a very large number of
             subjects. These advances result from 2 years of intensive
             efforts in optimising many aspects of data acquisition and
             processing during the piloting phase of the project. The
             data quality and methods described here are representative
             of the datasets and processing pipelines that will be made
             freely available to the community at quarterly intervals,
             beginning in 2013.},
   Doi = {10.1016/j.neuroimage.2013.05.057},
   Key = {fds264738}
}

@article{fds264736,
   Author = {Chen, B and Polatkan, G and Sapiro, G and Blei, D and Dunson, D and Carin,
             L},
   Title = {Deep learning with hierarchical convolutional factor
             analysis.},
   Journal = {IEEE transactions on pattern analysis and machine
             intelligence},
   Volume = {35},
   Number = {8},
   Pages = {1887-1901},
   Year = {2013},
   Month = {August},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23787342},
   Abstract = {Unsupervised multilayered (&#x201C;deep&#x201D;) models are
             considered for imagery. The model is represented using a
             hierarchical convolutional factor-analysis construction,
             with sparse factor loadings and scores. The computation of
             layer-dependent model parameters is implemented within a
             Bayesian setting, employing a Gibbs sampler and variational
             Bayesian (VB) analysis that explicitly exploit the
             convolutional nature of the expansion. To address
             large-scale and streaming data, an online version of VB is
             also developed. The number of dictionary elements at each
             layer is inferred from the data, based on a beta-Bernoulli
             implementation of the Indian buffet process. Example results
             are presented for several image-processing applications,
             with comparisons to related models in the
             literature.},
   Doi = {10.1109/tpami.2013.19},
   Key = {fds264736}
}

@article{fds304058,
   Author = {Qiu, Q and Sapiro, G and Chen, C-H},
   Title = {Domain-invariant Face Recognition using Learned Low-rank
             Transformation},
   Volume = {abs/1308.0275},
   Year = {2013},
   Month = {August},
   url = {http://arxiv.org/abs/1308.0275v1},
   Abstract = {We present a low-rank transformation approach to compensate
             for face variations due to changes in visual domains, such
             as pose and illumination. The key idea is to learn
             discriminative linear transformations for face images using
             matrix rank as the optimization criteria. The learned linear
             transformations restore a shared low-rank structure for
             faces from the same subject, and, at the same time, force a
             high-rank structure for faces from different subjects. In
             this way, among the transformed faces, we reduce variations
             caused by domain changes within the classes, and increase
             separations between the classes for better face recognition
             across domains. Extensive experiments using public datasets
             are presented to demonstrate the effectiveness of our
             approach for face recognition across domains. The potential
             of the approach for feature extraction in generic object
             recognition and coded aperture design are discussed as
             well.},
   Key = {fds304058}
}

@article{fds304059,
   Author = {Qiu, Q and Sapiro, G},
   Title = {Learning Robust Subspace Clustering},
   Volume = {abs/1308.0273},
   Year = {2013},
   Month = {August},
   url = {http://arxiv.org/abs/1308.0273v1},
   Abstract = {We propose a low-rank transformation-learning framework to
             robustify subspace clustering. Many high-dimensional data,
             such as face images and motion sequences, lie in a union of
             low-dimensional subspaces. The subspace clustering problem
             has been extensively studied in the literature to partition
             such high-dimensional data into clusters corresponding to
             their underlying low-dimensional subspaces. However,
             low-dimensional intrinsic structures are often violated for
             real-world observations, as they can be corrupted by errors
             or deviate from ideal models. We propose to address this by
             learning a linear transformation on subspaces using matrix
             rank, via its convex surrogate nuclear norm, as the
             optimization criteria. The learned linear transformation
             restores a low-rank structure for data from the same
             subspace, and, at the same time, forces a high-rank
             structure for data from different subspaces. In this way, we
             reduce variations within the subspaces, and increase
             separations between the subspaces for more accurate subspace
             clustering. This proposed learned robust subspace clustering
             framework significantly enhances the performance of existing
             subspace clustering methods. To exploit the low-rank
             structures of the transformed subspaces, we further
             introduce a subspace clustering technique, called Robust
             Sparse Subspace Clustering, which efficiently combines
             robust PCA with sparse modeling. We also discuss the online
             learning of the transformation, and learning of the
             transformation while simultaneously reducing the data
             dimensionality. Extensive experiments using public datasets
             are presented, showing that the proposed approach
             significantly outperforms state-of-the-art subspace
             clustering methods.},
   Key = {fds304059}
}

@article{fds264741,
   Author = {Caruyer, E and Lenglet, C and Sapiro, G and Deriche,
             R},
   Title = {Design of multishell sampling schemes with uniform coverage
             in diffusion MRI.},
   Journal = {Magnetic resonance in medicine},
   Volume = {69},
   Number = {6},
   Pages = {1534-1540},
   Year = {2013},
   Month = {June},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23625329},
   Abstract = {<h4>Purpose</h4>In diffusion MRI, a technique known as
             diffusion spectrum imaging reconstructs the propagator with
             a discrete Fourier transform, from a Cartesian sampling of
             the diffusion signal. Alternatively, it is possible to
             directly reconstruct the orientation distribution function
             in q-ball imaging, providing so-called high angular
             resolution diffusion imaging. In between these two
             techniques, acquisitions on several spheres in q-space offer
             an interesting trade-off between the angular resolution and
             the radial information gathered in diffusion MRI. A careful
             design is central in the success of multishell acquisition
             and reconstruction techniques.<h4>Methods</h4>The design of
             acquisition in multishell is still an open and active field
             of research, however. In this work, we provide a general
             method to design multishell acquisition with uniform angular
             coverage. This method is based on a generalization of
             electrostatic repulsion to multishell.<h4>Results</h4>We
             evaluate the impact of our method using simulations, on the
             angular resolution in one and two bundles of fiber
             configurations. Compared to more commonly used radial
             sampling, we show that our method improves the angular
             resolution, as well as fiber crossing discrimination.<h4>Discussion</h4>We
             propose a novel method to design sampling schemes with
             optimal angular coverage and show the positive impact on
             angular resolution in diffusion MRI.},
   Doi = {10.1002/mrm.24736},
   Key = {fds264741}
}

@article{fds304060,
   Author = {Spina, TV and Tepper, M and Esler, A and Morellas, V and Papanikolopoulos, N and Falcão, AX and Sapiro,
             G},
   Title = {Video Human Segmentation using Fuzzy Object Models and its
             Application to Body Pose Estimation of Toddlers for Behavior
             Studies},
   Year = {2013},
   Month = {May},
   url = {http://arxiv.org/abs/1305.6918v1},
   Abstract = {Video object segmentation is a challenging problem due to
             the presence of deformable, connected, and articulated
             objects, intra- and inter-object occlusions, object motion,
             and poor lighting. Some of these challenges call for object
             models that can locate a desired object and separate it from
             its surrounding background, even when both share similar
             colors and textures. In this work, we extend a fuzzy object
             model, named cloud system model (CSM), to handle video
             segmentation, and evaluate it for body pose estimation of
             toddlers at risk of autism. CSM has been successfully used
             to model the parts of the brain (cerebrum, left and right
             brain hemispheres, and cerebellum) in order to automatically
             locate and separate them from each other, the connected
             brain stem, and the background in 3D MR-images. In our case,
             the objects are articulated parts (2D projections) of the
             human body, which can deform, cause self-occlusions, and
             move along the video. The proposed CSM extension handles
             articulation by connecting the individual clouds, body
             parts, of the system using a 2D stickman model. The stickman
             representation naturally allows us to extract 2D body pose
             measures of arm asymmetry patterns during unsupported gait
             of toddlers, a possible behavioral marker of autism. The
             results show that our method can provide insightful
             knowledge to assist the specialist's observations during
             real in-clinic assessments.},
   Key = {fds304060}
}

@article{fds264872,
   Author = {Su, S and White, T and Schmidt, M and Kao, C-Y and Sapiro,
             G},
   Title = {Geometric computation of human gyrification indexes from
             magnetic resonance images.},
   Journal = {Human brain mapping},
   Volume = {34},
   Number = {5},
   Pages = {1230-1244},
   Year = {2013},
   Month = {May},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22331577},
   Abstract = {Human brains are highly convoluted surfaces with multiple
             folds. To characterize the complexity of these folds and
             their relationship with neurological and psychiatric
             conditions, different techniques have been developed to
             quantify the folding patterns, also known as the surface
             complexity or gyrification of the brain. In this study, the
             authors propose a new geometric approach to measure the
             gyrification of human brains from magnetic resonance images.
             This approach is based on intrinsic 3D measurements that
             relate the local brain surface area to the corresponding
             area of a tightly wrapped sheet. The authors also present an
             adaptation of this technique in which the geodesic depth is
             incorporated into the gyrification computation. These
             gyrification measures are efficiently and accurately
             computed by solving geometric partial differential
             equations. The presentation of the geometric framework is
             complemented with experimental results for brain complexity
             in typically developing children and adolescents. Using this
             novel approach, the authors provide evidence for a gradual
             decrease in brain surface complexity throughout childhood
             and adolescence. These developmental differences occur
             earlier in the occipital lobe and move anterior as children
             progress into young adulthood.},
   Doi = {10.1002/hbm.21510},
   Key = {fds264872}
}

@article{fds304062,
   Author = {Llull, P and Liao, X and Yuan, X and Yang, J and Kittle, D and Carin, L and Sapiro, G and Brady, DJ},
   Title = {Coded aperture compressive temporal imaging.},
   Journal = {Optics express},
   Volume = {21},
   Number = {9},
   Pages = {10526-10545},
   Year = {2013},
   Month = {May},
   url = {http://arxiv.org/abs/1302.2575v1},
   Abstract = {We use mechanical translation of a coded aperture for code
             division multiple access compression of video. We discuss
             the compressed video's temporal resolution and present
             experimental results for reconstructions of > 10 frames of
             temporal data per coded snapshot.},
   Doi = {10.1364/oe.21.010526},
   Key = {fds304062}
}

@article{fds264747,
   Author = {Harris, AK and Meyerson, JR and Matsuoka, Y and Kuybeda, O and Moran, A and Bliss, D and Das, SR and Yewdell, JW and Sapiro, G and Subbarao, K and Subramaniam, S},
   Title = {Structure and accessibility of HA trimers on intact 2009
             H1N1 pandemic influenza virus to stem region-specific
             neutralizing antibodies.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {110},
   Number = {12},
   Pages = {4592-4597},
   Year = {2013},
   Month = {March},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23460696},
   Abstract = {Rapid antigenic variation of HA, the major virion surface
             protein of influenza A virus, remains the principal
             challenge to the development of broader and more effective
             vaccines. Some regions of HA, such as the stem region
             proximal to the viral membrane, are nevertheless highly
             conserved across strains and among most subtypes. A
             fundamental question in vaccine design is the extent to
             which HA stem regions on the surface of the virus are
             accessible to broadly neutralizing antibodies. Here we
             report 3D structures derived from cryoelectron tomography of
             HA on intact 2009 H1N1 pandemic virions in the presence and
             absence of the antibody C179, which neutralizes viruses
             expressing a broad range of HA subtypes, including H1, H2,
             H5, H6, and H9. By fitting previously derived
             crystallographic structures of trimeric HA into the density
             maps, we deduced the locations of the molecular surfaces of
             HA involved in interaction with C179. Using computational
             methods to distinguish individual unliganded HA trimers from
             those that have bound C179 antibody, we demonstrate that
             ∼75% of HA trimers on the surface of the virus have C179
             bound to the stem domain. Thus, despite their close packing
             on the viral membrane, the majority of HA trimers on intact
             virions are available to bind anti-stem antibodies that
             target conserved HA epitopes, establishing the feasibility
             of universal influenza vaccines that elicit such
             antibodies.},
   Doi = {10.1073/pnas.1214913110},
   Key = {fds264747}
}

@article{fds265109,
   Author = {Kuybeda, O and Frank, GA and Bartesaghi, A and Borgnia, M and Subramaniam, S and Sapiro, G},
   Title = {A collaborative framework for 3D alignment and
             classification of heterogeneous subvolumes in cryo-electron
             tomography.},
   Journal = {J Struct Biol},
   Volume = {181},
   Number = {2},
   Pages = {116-127},
   Year = {2013},
   Month = {February},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23110852},
   Abstract = {The limitation of using low electron doses in
             non-destructive cryo-electron tomography of biological
             specimens can be partially offset via averaging of aligned
             and structurally homogeneous subsets present in tomograms.
             This type of sub-volume averaging is especially challenging
             when multiple species are present. Here, we tackle the
             problem of conformational separation and alignment with a
             "collaborative" approach designed to reduce the effect of
             the "curse of dimensionality" encountered in standard
             pair-wise comparisons. Our new approach is based on using
             the nuclear norm as a collaborative similarity measure for
             alignment of sub-volumes, and by exploiting the presence of
             symmetry early in the processing. We provide a strict
             validation of this method by analyzing mixtures of intact
             simian immunodeficiency viruses SIV mac239 and SIV CP-MAC.
             Electron microscopic images of these two virus preparations
             are indistinguishable except for subtle differences in
             conformation of the envelope glycoproteins displayed on the
             surface of each virus particle. By using the nuclear
             norm-based, collaborative alignment method presented here,
             we demonstrate that the genetic identity of each virus
             particle present in the mixture can be assigned based solely
             on the structural information derived from single envelope
             glycoproteins displayed on the virus surface.},
   Doi = {10.1016/j.jsb.2012.10.010},
   Key = {fds265109}
}

@article{fds311296,
   Author = {Duarte-Carvajalino, JM and Yu, G and Carin, L and Sapiro,
             G},
   Title = {Task-driven adaptive statistical compressive sensing of
             gaussian mixture models},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {61},
   Number = {3},
   Pages = {585-600},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2013},
   Month = {January},
   url = {http://arxiv.org/abs/1201.5404v1},
   Abstract = {A framework for adaptive and non-adaptive statistical
             compressive sensing is developed, where a statistical model
             replaces the standard sparsity model of classical
             compressive sensing. We propose within this framework
             optimal task-specific sensing protocols specifically and
             jointly designed for classification and reconstruction. A
             two-step adaptive sensing paradigm is developed, where
             online sensing is applied to detect the signal class in the
             first step, followed by a reconstruction step adapted to the
             detected class and the observed samples. The approach is
             based on information theory, here tailored for Gaussian
             mixture models (GMMs), where an information-theoretic
             objective relationship between the sensed signals and a
             representation of the specific task of interest is
             maximized. Experimental results using synthetic signals,
             Landsat satellite attributes, and natural images of
             different sizes and with different noise levels show the
             improvements achieved using the proposed framework when
             compared to more standard sensing protocols. The underlying
             formulation can be applied beyond GMMs, at the price of
             higher mathematical and computational complexity. ©
             1991-2012 IEEE.},
   Doi = {10.1109/TSP.2012.2225054},
   Key = {fds311296}
}

@article{fds264716,
   Author = {Elhamifar, E and Sapiro, G and Yang, A and Sasrty,
             SS},
   Title = {A convex optimization framework for active
             learning},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Pages = {209-216},
   Publisher = {IEEE},
   Year = {2013},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ICCV.2013.33},
   Abstract = {In many image/video/web classification problems, we have
             access to a large number of unlabeled samples. However, it
             is typically expensive and time consuming to obtain labels
             for the samples. Active learning is the problem of
             progressively selecting and annotating the most informative
             unlabeled samples, in order to obtain a high classification
             performance. Most existing active learning algorithms select
             only one sample at a time prior to retraining the
             classifier. Hence, they are computationally expensive and
             cannot take advantage of parallel labeling systems such as
             Mechanical Turk. On the other hand, algorithms that allow
             the selection of multiple samples prior to retraining the
             classifier, may select samples that have significant
             information overlap or they involve solving a non-convex
             optimization. More importantly, the majority of active
             learning algorithms are developed for a certain classifier
             type such as SVM. In this paper, we develop an efficient
             active learning framework based on convex programming, which
             can select multiple samples at a time for annotation. Unlike
             the state of the art, our algorithm can be used in
             conjunction with any type of classifiers, including those of
             the family of the recently proposed Sparse
             Representation-based Classification (SRC). We use the two
             principles of classifier uncertainty and sample diversity in
             order to guide the optimization program towards selecting
             the most informative unlabeled samples, which have the least
             information overlap. Our method can incorporate the data
             distribution in the selection process by using the
             appropriate dissimilarity between pairs of samples. We show
             the effectiveness of our framework in person detection,
             scene categorization and face recognition on real-world
             datasets. © 2013 IEEE.},
   Doi = {10.1109/ICCV.2013.33},
   Key = {fds264716}
}

@article{fds264717,
   Author = {Fiori, M and Sprechmann, P and Vogelstein, J and Musé, P and Sapiro,
             G},
   Title = {Robust multimodal graph matching: Sparse coding meets graph
             matching},
   Journal = {Advances in Neural Information Processing
             Systems},
   Year = {2013},
   Month = {January},
   ISSN = {1049-5258},
   Abstract = {Graph matching is a challenging problem with very important
             applications in a wide range of fields, from image and video
             analysis to biological and biomedical problems. We propose a
             robust graph matching algorithm inspired in sparsity-related
             techniques. We cast the problem, resembling group or
             collaborative sparsity formulations, as a non-smooth convex
             optimization problem that can be efficiently solved using
             augmented Lagrangian techniques. The method can deal with
             weighted or unweighted graphs, as well as multimodal data,
             where different graphs represent different types of data.
             The proposed approach is also naturally integrated with
             collaborative graph inference techniques, solving general
             network inference problems where the observed variables,
             possibly coming from different modalities, are not in
             correspondence. The algorithm is tested and compared with
             state-of-the-art graph matching techniques in both synthetic
             and real graphs. We also present results on multimodal
             graphs and applications to collaborative inference of brain
             connectivity from alignment-free functional magnetic
             resonance imaging (fMRI) data. The code is publicly
             available.},
   Key = {fds264717}
}

@article{fds264718,
   Author = {Sprechmann, P and Litman, R and Ben Yakar and T and Bronstein, A and Sapiro, G},
   Title = {Efficient supervised sparse analysis and synthesis
             operators},
   Journal = {Advances in Neural Information Processing
             Systems},
   Year = {2013},
   Month = {January},
   ISSN = {1049-5258},
   Abstract = {In this paper, we propose a new computationally efficient
             framework for learning sparse models. We formulate a unified
             approach that contains as particular cases models promoting
             sparse synthesis and analysis type of priors, and mixtures
             thereof. The supervised training of the proposed model is
             formulated as a bilevel optimization problem, in which the
             operators are optimized to achieve the best possible
             performance on a specific task, e.g., reconstruction or
             classification. By restricting the operators to be shift
             invariant, our approach can be thought as a way of learning
             sparsity-promoting convolutional operators. Leveraging
             recent ideas on fast trainable regressors designed to
             approximate exact sparse codes, we propose a way of
             constructing feed-forward networks capable of approximating
             the learned models at a fraction of the computational cost
             of exact solvers. In the shift-invariant case, this leads to
             a principled way of constructing a form of task-specific
             convolutional networks. We illustrate the proposed models on
             several experiments in music analysis and image processing
             applications.},
   Key = {fds264718}
}

@article{fds264719,
   Author = {Llull, P and Liao, X and Yuan, X and Yang, J and Kittle, D and Carin, L and Sapiro, G and Brady, DJ},
   Title = {Compressive sensing for video using a passive coding
             element},
   Journal = {Optics InfoBase Conference Papers},
   Year = {2013},
   Month = {January},
   Abstract = {We present a prototype system that utilizes mechanical
             translation of a passive coding element to compress
             high-speed temporal information into low-framerate video
             sequences. Reconstructions of 148 frames per experimental
             coded snapshot are reported. © OSA 2013.},
   Key = {fds264719}
}

@article{fds264721,
   Author = {Tepper, M and Sapiro, G},
   Title = {Fast L1 smoothing splines with an application to Kinect
             depth data},
   Journal = {2013 IEEE International Conference on Image Processing, ICIP
             2013 - Proceedings},
   Pages = {504-508},
   Publisher = {IEEE},
   Year = {2013},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ICIP.2013.6738104},
   Abstract = {Splines are a popular and attractive way of smoothing noisy
             data. Computing splines involves minimizing a functional
             which is a linear combination of a fitting term and a
             regularization term. The former is classically computed
             using a (sometimes weighted) L2 norm while the latter
             ensures smoothness. In this work we propose to replace the
             L2 norm in the fitting term with an L1 norm, leading to
             automatic robustness to outliers. To solve the resulting
             minimization problem we propose an extremely simple and
             efficient numerical scheme based on split-Bregman iteration
             and a DCT-based filter. The algorithm is applied to the
             problem of smoothing and impainting range data, where
             high-quality results are obtained in short processing times.
             © 2013 IEEE.},
   Doi = {10.1109/ICIP.2013.6738104},
   Key = {fds264721}
}

@article{fds264722,
   Author = {Yang, J and Yuan, X and Liao, X and Llull, P and Sapiro, G and Brady, DJ and Carin, L},
   Title = {Gaussian mixture model for video compressive
             sensing},
   Journal = {2013 IEEE International Conference on Image Processing, ICIP
             2013 - Proceedings},
   Pages = {19-23},
   Publisher = {IEEE},
   Year = {2013},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ICIP.2013.6738005},
   Abstract = {A Gaussian Mixture Model (GMM)-based algorithm is proposed
             for video reconstruction from temporal compressed
             measurements. The GMM is used to model spatio-temporal video
             patches, and the reconstruction can be efficiently computed
             based on analytic expressions. The developed GMM
             reconstruction method benefits from online adaptive learning
             and parallel computation. We demonstrate the efficacy of the
             proposed GMM with videos reconstructed from simulated
             compressive video measurements and from a real compressive
             video camera. © 2013 IEEE.},
   Doi = {10.1109/ICIP.2013.6738005},
   Key = {fds264722}
}

@article{fds264744,
   Author = {Harris, AK and Meyerson, JR and Matsuoka, Y and Kuybeda, O and Moran, A and Bliss, D and Das, SR and Yewdell, J and Sapiro, G and Subbarao, K and Subramaniam, S},
   Title = {Molecular Structures of Native HA Trimers on 2009 H1N1
             Pandemic Influenza Virus Complexed with Neutralizing
             Antibodies},
   Journal = {Biophysical Journal},
   Volume = {104},
   Number = {2},
   Pages = {414a-414a},
   Publisher = {Elsevier BV},
   Year = {2013},
   Month = {January},
   ISSN = {0006-3495},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000316074304111&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1016/j.bpj.2012.11.2310},
   Key = {fds264744}
}

@article{fds264745,
   Author = {Frank, GA and Kuybeda, O and Bartesaghi, A and Borgnia, MJ and Sapiro,
             G and Subramaniam, S},
   Title = {Computational Separation of Conformational Heterogeneity
             using Cryo-Electron Tomography and 3D Sub-Volume
             Averaging},
   Journal = {Biophysical Journal},
   Volume = {104},
   Number = {2},
   Pages = {350a-351a},
   Publisher = {Elsevier BV},
   Year = {2013},
   Month = {January},
   ISSN = {0006-3495},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000316074303293&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1016/j.bpj.2012.11.1947},
   Key = {fds264745}
}

@article{fds264746,
   Author = {Duarte-Carvajalino, JM and Sapiro, G and Harel, N and Lenglet,
             C},
   Title = {A Framework for Linear and Non-Linear Registration of
             Diffusion-Weighted MRIs Using Angular Interpolation.},
   Journal = {Frontiers in neuroscience},
   Volume = {7},
   Pages = {41},
   Year = {2013},
   Month = {January},
   ISSN = {1662-4548},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23596381},
   Abstract = {Registration of diffusion-weighted magnetic resonance images
             (DW-MRIs) is a key step for population studies, or
             construction of brain atlases, among other important tasks.
             Given the high dimensionality of the data, registration is
             usually performed by relying on scalar representative
             images, such as the fractional anisotropy (FA) and
             non-diffusion-weighted (b0) images, thereby ignoring much of
             the directional information conveyed by DW-MR datasets
             itself. Alternatively, model-based registration algorithms
             have been proposed to exploit information on the preferred
             fiber orientation(s) at each voxel. Models such as the
             diffusion tensor or orientation distribution function (ODF)
             have been used for this purpose. Tensor-based registration
             methods rely on a model that does not completely capture the
             information contained in DW-MRIs, and largely depends on the
             accurate estimation of tensors. ODF-based approaches are
             more recent and computationally challenging, but also better
             describe complex fiber configurations thereby potentially
             improving the accuracy of DW-MRI registration. A new
             algorithm based on angular interpolation of the
             diffusion-weighted volumes was proposed for affine
             registration, and does not rely on any specific local
             diffusion model. In this work, we first extensively compare
             the performance of registration algorithms based on (i)
             angular interpolation, (ii) non-diffusion-weighted scalar
             volume (b0), and (iii) diffusion tensor image (DTI).
             Moreover, we generalize the concept of angular interpolation
             (AI) to non-linear image registration, and implement it in
             the FMRIB Software Library (FSL). We demonstrate that AI
             registration of DW-MRIs is a powerful alternative to volume
             and tensor-based approaches. In particular, we show that AI
             improves the registration accuracy in many cases over
             existing state-of-the-art algorithms, while providing
             registered raw DW-MRI data, which can be used for any
             subsequent analysis.},
   Doi = {10.3389/fnins.2013.00041},
   Key = {fds264746}
}

@article{fds264748,
   Author = {Caruyer, E and Aganj, I and Lenglet, C and Sapiro, G and Deriche,
             R},
   Title = {Motion Detection in Diffusion MRI via Online ODF
             Estimation.},
   Journal = {International journal of biomedical imaging},
   Volume = {2013},
   Pages = {849363},
   Year = {2013},
   Month = {January},
   ISSN = {1687-4188},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23509445},
   Abstract = {The acquisition of high angular resolution diffusion MRI is
             particularly long and subject motion can become an issue.
             The orientation distribution function (ODF) can be
             reconstructed online incrementally from diffusion-weighted
             MRI with a Kalman filtering framework. This online
             reconstruction provides real-time feedback throughout the
             acquisition process. In this article, the Kalman filter is
             first adapted to the reconstruction of the ODF in constant
             solid angle. Then, a method called STAR (STatistical
             Analysis of Residuals) is presented and applied to the
             online detection of motion in high angular resolution
             diffusion images. Compared to existing techniques, this
             method is image based and is built on top of a Kalman
             filter. Therefore, it introduces no additional scan time and
             does not require additional hardware. The performance of
             STAR is tested on simulated and real data and compared to
             the classical generalized likelihood ratio test. Successful
             detection of small motion is reported (rotation under 2°)
             with no delay and robustness to noise.},
   Doi = {10.1155/2013/849363},
   Key = {fds264748}
}

@article{fds264870,
   Author = {Zhan, L and Mueller, BA and Jahanshad, N and Jin, Y and Lenglet, C and Yacoub, E and Sapiro, G and Ugurbil, K and Harel, N and Toga, AW and Lim,
             KO and Thompson, PM},
   Title = {Magnetic resonance field strength effects on diffusion
             measures and brain connectivity networks.},
   Journal = {Brain connectivity},
   Volume = {3},
   Number = {1},
   Pages = {72-86},
   Year = {2013},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23205551},
   Abstract = {The quest to map brain connectivity is being pursued
             worldwide using diffusion imaging, among other techniques.
             Even so, we know little about how brain connectivity
             measures depend on the magnetic field strength of the
             scanner. To investigate this, we scanned 10 healthy subjects
             at 7 and 3 tesla-using 128-gradient high-angular resolution
             diffusion imaging. For each subject and scan, whole-brain
             tractography was used to estimate connectivity between 113
             cortical and subcortical regions. We examined how scanner
             field strength affects (i) the signal-to-noise ratio (SNR)
             of the non-diffusion-sensitized reference images (b(0));
             (ii) diffusion tensor imaging (DTI)-derived fractional
             anisotropy (FA), mean, radial, and axial diffusivity
             (MD/RD/AD), in atlas-defined regions; (iii) whole-brain
             tractography; (iv) the 113 × 113 brain connectivity maps;
             and (v) five commonly used network topology measures. We
             also assessed effects of the multi-channel reconstruction
             methods (sum-of-squares, SOS, at 7T; adaptive recombine, AC,
             at 3T). At 7T with SOS, the b0 images had 18.3% higher SNR
             than with 3T-AC. FA was similar for most regions of interest
             (ROIs) derived from an online DTI atlas (ICBM81), but higher
             at 7T in the cerebral peduncle and internal capsule. MD, AD,
             and RD were lower at 7T for most ROIs. The apparent fiber
             density between some subcortical regions was greater at
             7T-SOS than 3T-AC, with a consistent connection pattern
             overall. Suggesting the need for caution, the recovered
             brain network was apparently more efficient at 7T, which
             cannot be biologically true as the same subjects were
             assessed. Care is needed when comparing network measures
             across studies, and when interpreting apparently discrepant
             findings.},
   Doi = {10.1089/brain.2012.0114},
   Key = {fds264870}
}

@article{fds264871,
   Author = {Chen, B and Polatkan, G and Sapiro, G and Blei, D and Dunson, D and Carin,
             L},
   Title = {Deep Learning with Hierarchical Convolutional Factor
             Analysis.},
   Journal = {IEEE transactions on pattern analysis and machine
             intelligence},
   Year = {2013},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23319498},
   Abstract = {Unsupervised multi-layered ("deep") models are considered
             for general data, with a particular focus on imagery. The
             model is represented using a hierarchical convolutional
             factor-analysis construction, with sparse factor loadings
             and scores. The computation of layer-dependent model
             parameters is implemented within a Bayesian setting,
             employing a Gibbs sampler and variational Bayesian (VB)
             analysis, that explicitly exploit the convolutional nature
             of the expansion. In order to address large-scale and
             streaming data, an online version of VB is also developed.
             The number of basis functions or dictionary elements at each
             layer is inferred from the data, based on a beta-Bernoulli
             implementation of the Indian buffet process. Example results
             are presented for several image-processing applications,
             with comparisons to related models in the
             literature.},
   Key = {fds264871}
}

@article{fds304061,
   Author = {Yuan, X and Yang, J and Llull, P and Liao, X and Sapiro, G and Brady, DJ and Carin, L},
   Title = {Adaptive temporal compressive sensing for
             video},
   Journal = {2013 IEEE International Conference on Image Processing, ICIP
             2013 - Proceedings},
   Pages = {14-18},
   Year = {2013},
   Month = {January},
   url = {http://arxiv.org/abs/1302.3446v3},
   Abstract = {This paper introduces the concept of adaptive temporal
             compressive sensing (CS) for video. We propose a CS
             algorithm to adapt the compression ratio based on the
             scene's temporal complexity, computed from the compressed
             data, without compromising the quality of the reconstructed
             video. The temporal adaptivity is manifested by manipulating
             the integration time of the camera, opening the possibility
             to realtime implementation. The proposed algorithm is a
             generalized temporal CS approach that can be incorporated
             with a diverse set of existing hardware systems. © 2013
             IEEE.},
   Doi = {10.1109/ICIP.2013.6738004},
   Key = {fds304061}
}

@article{fds304064,
   Author = {Pokrass, J and Bronstein, AM and Bronstein, MM and Sprechmann, P and Sapiro, G},
   Title = {Sparse modeling of intrinsic correspondences},
   Journal = {Computer Graphics Forum},
   Volume = {32},
   Number = {2 PART4},
   Pages = {459-468},
   Publisher = {WILEY},
   Year = {2013},
   Month = {January},
   url = {http://arxiv.org/abs/1209.6560v1},
   Abstract = {We present a novel sparse modeling approach to non-rigid
             shape matching using only the ability to detect repeatable
             regions. As the input to our algorithm, we are given only
             two sets of regions in two shapes; no descriptors are
             provided so the correspondence between the regions is not
             know, nor we know how many regions correspond in the two
             shapes. We show that even with such scarce information, it
             is possible to establish very accurate correspondence
             between the shapes by using methods from the field of sparse
             modeling, being this, the first non-trivial use of sparse
             models in shape correspondence. We formulate the problem of
             permuted sparse coding, in which we solve simultaneously for
             an unknown permutation ordering the regions on two shapes
             and for an unknown correspondence in functional
             representation. We also propose a robust variant capable of
             handling incomplete matches. Numerically, the problem is
             solved efficiently by alternating the solution of a linear
             assignment and a sparse coding problem. The proposed methods
             are evaluated qualitatively and quantitatively on standard
             benchmarks containing both synthetic and scanned objects. ©
             2013 The Eurographics Association and Blackwell Publishing
             Ltd.},
   Doi = {10.1111/cgf.12066},
   Key = {fds304064}
}

@article{fds345463,
   Author = {Yakar, TB and Litman, R and Sprechmann, P and Bronstein, A and Sapiro,
             G},
   Title = {Bilevel sparse models for polyphonic music
             transcription},
   Journal = {Proceedings of the 14th International Society for Music
             Information Retrieval Conference, ISMIR 2013},
   Pages = {65-70},
   Year = {2013},
   Month = {January},
   ISBN = {9780615900650},
   Abstract = {In this work, we propose a trainable sparse model for
             automatic polyphonic music transcription, which incorporates
             several successful approaches into a unified optimization
             framework. Our model combines unsupervised synthesis models
             similar to latent component analysis and nonnegative
             factorization with metric learning techniques that allow
             supervised discriminative learning. We develop efficient
             stochastic gradient training schemes allowing unsupervised,
             semi-, and fully supervised training of the model as well
             its adaptation to test data. We show efficient fixed
             complexity and latency approximation that can replace
             iterative minimization algorithms in time-critical
             applications. Experimental evaluation on synthetic and real
             data shows promising initial results.},
   Key = {fds345463}
}

@article{fds350556,
   Author = {Llull, P and Liao, X and Yuan, X and Yang, J and Kittle, D and Carin, L and Sapiro, G and Brady, DJ},
   Title = {Compressive sensing for video using a passive coding
             element},
   Journal = {Optics InfoBase Conference Papers},
   Year = {2013},
   Month = {January},
   ISBN = {9781557529756},
   url = {http://dx.doi.org/10.1364/cosi.2013.cm1c.3},
   Abstract = {We present a prototype system that utilizes mechanical
             translation of a passive coding element to compress
             high-speed temporal information into low-framerate video
             sequences. Reconstructions of 148 frames per experimental
             coded snapshot are reported. © OSA 2013.},
   Doi = {10.1364/cosi.2013.cm1c.3},
   Key = {fds350556}
}

@article{fds264704,
   Author = {Tang, Z and Tepper, M and Sapiro, G},
   Title = {Reflective Symmetry Detection by Rectifying Randomized
             Correspondences},
   Journal = {Procedings of the British Machine Vision Conference
             2013},
   Publisher = {British Machine Vision Association},
   Year = {2013},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000346352700112&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.5244/c.27.115},
   Key = {fds264704}
}

@article{fds264827,
   Author = {Duarte-Carvajalino, JM and Yu, G and Carin, L and Sapiro,
             G},
   Title = {Task-Driven Adaptive Statistical Compressive Sensing of
             Gaussian Mixture Models.},
   Journal = {IEEE Trans. Signal Process.},
   Volume = {61},
   Pages = {585-600},
   Year = {2013},
   url = {http://arxiv.org/abs/1201.5404v1},
   Abstract = {A framework for adaptive and non-adaptive statistical
             compressive sensing is developed, where a statistical model
             replaces the standard sparsity model of classical
             compressive sensing. We propose within this framework
             optimal task-specific sensing protocols specifically and
             jointly designed for classification and reconstruction. A
             two-step adaptive sensing paradigm is developed, where
             online sensing is applied to detect the signal class in the
             first step, followed by a reconstruction step adapted to the
             detected class and the observed samples. The approach is
             based on information theory, here tailored for Gaussian
             mixture models (GMMs), where an information-theoretic
             objective relationship between the sensed signals and a
             representation of the specific task of interest is
             maximized. Experimental results using synthetic signals,
             Landsat satellite attributes, and natural images of
             different sizes and with different noise levels show the
             improvements achieved using the proposed framework when
             compared to more standard sensing protocols. The underlying
             formulation can be applied beyond GMMs, at the price of
             higher mathematical and computational complexity.},
   Doi = {10.1109/TSP.2012.2225054},
   Key = {fds264827}
}

@article{fds265112,
   Author = {Asiaee T. and A and Tepper, M and Banerjee, A and Sapiro,
             G},
   Title = {If you are happy and you know it... tweet},
   Journal = {ACM International Conference Proceeding Series},
   Pages = {1602-1606},
   Publisher = {ACM Press},
   Year = {2012},
   Month = {December},
   url = {http://dx.doi.org/10.1145/2396761.2398481},
   Abstract = {Extracting sentiment from Twitter data is one of the
             fundamental problems in social media analytics. Twitter's
             length constraint renders determining the positive/negative
             sentiment of a tweet difficult, even for a human judge. In
             this work we present a general framework for per-tweet (in
             contrast with batches of tweets) sentiment analysis which
             consists of: (1) extracting tweets about a desired target
             subject, (2) separating tweets with sentiment, and (3)
             setting apart positive from negative tweets. For each step,
             we study the performance of a number of classical and new
             machine learning algorithms. We also show that the intrinsic
             sparsity of tweets allows performing classification in a low
             dimensional space, via random projections, without losing
             accuracy. In addition, we present weighted variants of all
             employed algorithms, exploiting the available labeling
             uncertainty, which further improve classification accuracy.
             Finally, we show that spatially aggregating our per-tweet
             classification results produces a very satisfactory outcome,
             making our approach a good candidate for batch tweet
             sentiment analysis. © 2012 ACM.},
   Doi = {10.1145/2396761.2398481},
   Key = {fds265112}
}

@article{fds264734,
   Author = {Elhamifar, E and Sapiro, G and Vidal, R},
   Title = {Finding exemplars from pairwise dissimilarities via
             simultaneous sparse recovery},
   Journal = {Advances in Neural Information Processing
             Systems},
   Volume = {1},
   Pages = {19-27},
   Year = {2012},
   Month = {December},
   ISSN = {1049-5258},
   Abstract = {Given pairwise dissimilarities between data points, we
             consider the problem of finding a subset of data points,
             called representatives or exemplars, that can efficiently
             describe the data collection. We formulate the problem as a
             row-sparsity regularized trace minimization problem that can
             be solved efficiently using convex programming. The solution
             of the proposed optimization program finds the
             representatives and the probability that each data point is
             associated with each one of the representatives. We obtain
             the range of the regularization parameter for which the
             solution of the proposed optimization program changes from
             selecting one representative for all data points to
             selecting all data points as representatives. When data
             points are distributed around multiple clusters according to
             the dissimilarities, we show that the data points in each
             cluster select representatives only from that cluster.
             Unlike metric-based methods, our algorithm can be applied to
             dissimilarities that are asymmetric or violate the triangle
             inequality, i.e., it does not require that the pairwise
             dissimilarities come from a metric. We demonstrate the
             effectiveness of the proposed algorithm on synthetic data as
             well as real-world image and text data.},
   Key = {fds264734}
}

@article{fds264735,
   Author = {Fiori, M and Musé, P and Sapiro, G},
   Title = {Topology constraints in graphical models},
   Journal = {Advances in Neural Information Processing
             Systems},
   Volume = {1},
   Pages = {791-799},
   Year = {2012},
   Month = {December},
   ISSN = {1049-5258},
   Abstract = {Graphical models are a very useful tool to describe and
             understand natural phenomena, from gene expression to
             climate change and social interactions. The topological
             structure of these graphs/networks is a fundamental part of
             the analysis, and in many cases the main goal of the study.
             However, little work has been done on incorporating prior
             topological knowledge onto the estimation of the underlying
             graphical models from sample data. In this work we propose
             extensions to the basic joint regression model for network
             estimation, which explicitly incorporate graph-topological
             constraints into the corresponding optimization approach.
             The first proposed extension includes an eigenvector
             centrality constraint, thereby promoting this important
             prior topological property. The second developed extension
             promotes the formation of certain motifs, triangle-shaped
             ones in particular, which are known to exist for example in
             genetic regulatory networks. The presentation of the
             underlying formulations, which serve as examples of the
             introduction of topological constraints in network
             estimation, is complemented with examples in diverse
             datasets demonstrating the importance of incorporating such
             critical prior knowledge.},
   Key = {fds264735}
}

@article{fds264742,
   Author = {Tepper, M and Sapiro, G},
   Title = {Decoupled coarse-to-fine matching and nonlinear
             regularization for efficient motion estimation},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Pages = {1517-1520},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ICIP.2012.6467160},
   Abstract = {A simple motion estimation algorithm, light-weighted both in
             memory and in time, is presented in this paper. This
             simplicity is achieved by decoupling the matching and the
             regularization stages in the estimation process. Experiments
             show that the obtained results are comparable with
             state-of-the-art algorithms that are much more
             computationally demanding. © 2012 IEEE.},
   Doi = {10.1109/ICIP.2012.6467160},
   Key = {fds264742}
}

@article{fds264830,
   Author = {Fasching, J and Walczak, N and Sivalingam, R and Cullen, K and Murphy,
             B and Sapiro, G and Morellas, V and Papanikolopoulos,
             N},
   Title = {Detecting risk-markers in children in a preschool
             classroom},
   Journal = {IEEE International Conference on Intelligent Robots and
             Systems},
   Pages = {1010-1016},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   ISBN = {9781467317375},
   url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=6363628},
   Abstract = {Early intervention in mental disorders can dramatically
             increase an individual's quality of life. Additionally, when
             symptoms of mental illness appear in childhood or
             adolescence, they represent the later stages of a process
             that began years earlier. One goal of psychiatric research
             is to identify risk-markers: genetic, neural, behavioral
             and/or social deviations that indicate elevated risk of a
             particular mental disorder. Ideally, screening of
             risk-markers should occur in a community setting, and not a
             clinical setting which may be time-consuming and
             resource-intensive. Given this situation, a system for
             automatically detecting risk-markers in children would be
             highly valuable. In this paper, we describe such a system
             that has been installed at the Shirley G. Moore Lab School,
             a research pre-school at the University of Minnesota. This
             system consists of multiple RGB+D sensors and is able to
             detect children and adults in the classroom, tracking them
             as they move around the room. We use the tracking results to
             extract high-level information about the behavior and social
             interaction of children, that can then be used to screen for
             early signs of mental disorders. © 2012
             IEEE.},
   Doi = {10.1109/IROS.2012.6385732},
   Key = {fds264830}
}

@article{fds264832,
   Author = {Hashemi, J and Spina, TV and Tepper, M and Esler, A and Morellas, V and Papanikolopoulos, N and Sapiro, G},
   Title = {A computer vision approach for the assessment of
             autism-related behavioral markers},
   Journal = {2012 IEEE International Conference on Development and
             Learning and Epigenetic Robotics, ICDL 2012},
   Pages = {1-7},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   ISBN = {9781467349635},
   url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=6384412},
   Abstract = {The early detection of developmental disorders is key to
             child outcome, allowing interventions to be initiated that
             promote development and improve prognosis. Research on
             autism spectrum disorder (ASD) suggests behavioral markers
             can be observed late in the first year of life. Many of
             these studies involved extensive frame-by-frame video
             observation and analysis of a child's natural behavior.
             Although non-intrusive, these methods are extremely
             time-intensive and require a high level of observer
             training; thus, they are impractical for clinical purposes.
             Diagnostic measures for ASD are available for infants but
             are only accurate when used by specialists experienced in
             early diagnosis. This work is a first milestone in a
             long-term multidisciplinary project that aims at helping
             clinicians and general practitioners accomplish this early
             detection/measurement task automatically. We focus on
             providing computer vision tools to measure and identify ASD
             behavioral markers based on components of the Autism
             Observation Scale for Infants (AOSI). In particular, we
             develop algorithms to measure three critical AOSI activities
             that assess visual attention. We augment these AOSI
             activities with an additional test that analyzes
             asymmetrical patterns in unsupported gait. The first set of
             algorithms involves assessing head motion by facial feature
             tracking, while the gait analysis relies on joint foreground
             segmentation and 2D body pose estimation in video. We show
             results that provide insightful knowledge to augment the
             clinician's behavioral observations obtained from real
             in-clinic assessments. © 2012 IEEE.},
   Doi = {10.1109/DevLrn.2012.6400865},
   Key = {fds264832}
}

@article{fds264836,
   Author = {Sprechmann, P and Bronstein, A and Sapiro, G},
   Title = {Real-time online singing voice separation from monaural
             recordings using robust low-rank modeling},
   Journal = {Proceedings of the 13th International Society for Music
             Information Retrieval Conference, ISMIR 2012},
   Pages = {67-72},
   Publisher = {FEUP Edições},
   Editor = {Gouyon, F and Herrera, P and Martins, LG and Müller,
             M},
   Year = {2012},
   Month = {December},
   ISBN = {9789727521449},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/ismir/ismir2012.html},
   Abstract = {Separating the leading vocals from the musical accompaniment
             is a challenging task that appears naturally in several
             music processing applications. Robust principal component
             analysis (RPCA) has been recently employed to this problem
             producing very successful results. The method decomposes the
             signal into a low-rank component corresponding to the
             accompaniment with its repetitive structure, and a sparse
             component corresponding to the voice with its quasi-harmonic
             structure. In this paper we first introduce a non-negative
             variant of RPCA, termed as robust low-rank non-negative
             matrix factorization (RNMF). This new framework better suits
             audio applications. We then propose two efficient
             feed-forward architectures that approximate the RPCA and
             RNMF with low latency and a fraction of the complexity of
             the original optimization method. These approximants allow
             incorporating elements of unsupervised, semi- and
             fully-supervised learning into the RPCA and RNMF frameworks.
             Our basic implementation shows several orders of magnitude
             speedup compared to the exact solvers with no performance
             degradation, and allows online and faster-than-real-time
             processing. Evaluation on the MIR-1K dataset demonstrates
             state-of-the-art performance. © 2012 International Society
             for Music Information Retrieval.},
   Key = {fds264836}
}

@article{fds264841,
   Author = {Castrodad, A and Khuon, T and Rand, R and Sapiro,
             G},
   Title = {Sparse modeling for hyperspectral imagery with LiDAR data
             fusion for subpixel mapping},
   Journal = {International Geoscience and Remote Sensing Symposium
             (IGARSS)},
   Pages = {7275-7278},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   ISBN = {9781467311601},
   url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=6334512},
   Abstract = {Several studies suggest that the use of geometric features
             along with spectral information improves the classification
             and visualization quality of hyperspectral imagery. These
             studies normally make use of spatial neighborhoods of
             hyperspectral pixels for extracting these geometric
             features. In this work, we merge point cloud Light Detection
             and Ranging (LiDAR) data and hyperspectral imagery (HSI)
             into a single sparse modeling pipeline for subpixel mapping
             and classification. The model accounts for material
             variability and noise by using learned dictionaries that act
             as spectral endmembers. Additionally, the estimated
             abundances are influenced by the LiDAR point cloud density,
             particularly helpful in spectral mixtures involving partial
             occlusions and illumination changes caused by elevation
             differences. We demonstrate the advantages of the proposed
             algorithm with co-registered LiDAR-HSI data. © 2012
             IEEE.},
   Doi = {10.1109/IGARSS.2012.6351982},
   Key = {fds264841}
}

@article{fds265123,
   Author = {Bartesaghi, A and Lecumberry, F and Sapiro, G and Subramaniam,
             S},
   Title = {Protein secondary structure determination by constrained
             single-particle cryo-electron tomography.},
   Journal = {Structure (London, England : 1993)},
   Volume = {20},
   Number = {12},
   Pages = {2003-2013},
   Year = {2012},
   Month = {December},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23217682},
   Abstract = {Cryo-electron microscopy (cryo-EM) is a powerful technique
             for 3D structure determination of protein complexes by
             averaging information from individual molecular images. The
             resolutions that can be achieved with single-particle
             cryo-EM are frequently limited by inaccuracies in assigning
             molecular orientations based solely on 2D projection images.
             Tomographic data collection schemes, however, provide
             powerful constraints that can be used to more accurately
             determine molecular orientations necessary for 3D
             reconstruction. Here, we propose "constrained
             single-particle tomography" as a general strategy for 3D
             structure determination in cryo-EM. A key component of our
             approach is the effective use of images recorded in tilt
             series to extract high-resolution information and correct
             for the contrast transfer function. By incorporating
             geometric constraints into the refinement to improve
             orientational accuracy of images, we reduce model bias and
             overrefinement artifacts and demonstrate that protein
             structures can be determined at resolutions of ∼8 Å
             starting from low-dose tomographic tilt series.},
   Doi = {10.1016/j.str.2012.10.016},
   Key = {fds265123}
}

@article{fds304069,
   Author = {Michaeli, T and Eldar, YC and Sapiro, G},
   Title = {Semi-supervised single- And multi-domain regression with
             multi-domain training},
   Journal = {Information and Inference},
   Volume = {1},
   Number = {1},
   Pages = {68-97},
   Year = {2012},
   Month = {December},
   url = {http://arxiv.org/abs/1203.4422v1},
   Abstract = {We address the problems of multi- and single-domain
             regression based on distinct and unpaired labeled training
             sets for each of the domains and a large unlabeled training
             set from all domains. We formulate these problems as a
             Bayesian estimation with partial knowledge of statistical
             relations. We propose a worst-case design strategy and study
             the resulting estimators. Our analysis explicitly accounts
             for the cardinality of the labeled sets and includes the
             special cases in which one of the labeled sets is very large
             or, in the other extreme, completely missing. We demonstrate
             our estimators in the context of removing expressions from
             facial images and in the context of audio-visual word
             recognition, and provide comparisons to several recently
             proposed multi-modal learning algorithms.},
   Doi = {10.1093/imaiai/ias003},
   Key = {fds304069}
}

@article{fds304063,
   Author = {Hashemi, J and Spina, TV and Tepper, M and Esler, A and Morellas, V and Papanikolopoulos, N and Sapiro, G},
   Title = {Computer vision tools for the non-invasive assessment of
             autism-related behavioral markers},
   Volume = {abs/1210.7014},
   Year = {2012},
   Month = {October},
   url = {http://arxiv.org/abs/1210.7014v2},
   Abstract = {The early detection of developmental disorders is key to
             child outcome, allowing interventions to be initiated that
             promote development and improve prognosis. Research on
             autism spectrum disorder (ASD) suggests behavioral markers
             can be observed late in the first year of life. Many of
             these studies involved extensive frame-by-frame video
             observation and analysis of a child's natural behavior.
             Although non-intrusive, these methods are extremely
             time-intensive and require a high level of observer
             training; thus, they are impractical for clinical and large
             population research purposes. Diagnostic measures for ASD
             are available for infants but are only accurate when used by
             specialists experienced in early diagnosis. This work is a
             first milestone in a long-term multidisciplinary project
             that aims at helping clinicians and general practitioners
             accomplish this early detection/measurement task
             automatically. We focus on providing computer vision tools
             to measure and identify ASD behavioral markers based on
             components of the Autism Observation Scale for Infants
             (AOSI). In particular, we develop algorithms to measure
             three critical AOSI activities that assess visual attention.
             We augment these AOSI activities with an additional test
             that analyzes asymmetrical patterns in unsupported gait. The
             first set of algorithms involves assessing head motion by
             tracking facial features, while the gait analysis relies on
             joint foreground segmentation and 2D body pose estimation in
             video. We show results that provide insightful knowledge to
             augment the clinician's behavioral observations obtained
             from real in-clinic assessments.},
   Key = {fds304063}
}

@article{fds265106,
   Author = {Ramírez, I and Sapiro, G},
   Title = {LOw-rank data modeling via the minimum description length
             principle},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {2165-2168},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2012.6288341},
   Abstract = {Robust low-rank matrix estimation is a topic of increasing
             interest, with promising applications in a variety of
             fields, from computer vision to data mining and recommender
             systems. Recent theoretical results establish the ability of
             such data models to recover the true underlying low-rank
             matrix when a large portion of the measured matrix is either
             missing or arbitrarily corrupted. However, if low rank is
             not a hypothesis about the true nature of the data, but a
             device for extracting regularity from it, no current
             guidelines exist for choosing the rank of the estimated
             matrix. In this work we address this problem by means of the
             Minimum Description Length (MDL) principle - a well
             established information-theoretic approach to statistical
             inference - as a guideline for selecting a model for the
             data at hand. We demonstrate the practical usefulness of our
             formal approach with results for complex background
             extraction in video sequences. © 2012 IEEE.},
   Doi = {10.1109/ICASSP.2012.6288341},
   Key = {fds265106}
}

@article{fds265107,
   Author = {Michaeli, T and Eldar, YC and Sapiro, G},
   Title = {Semi-supervised multi-domain regression with distinct
             training sets},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {2145-2148},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2012.6288336},
   Abstract = {We address the problems of multi-domain and single-domain
             regression based on distinct labeled training sets for each
             of the domains and a large unlabeled training set from all
             domains. We formulate these problems as ones of Bayesian
             estimation with partial knowledge of statistical relations.
             We propose a worst-case design strategy and study the
             resulting estimators. Our analysis explicitly accounts for
             the cardinality of the labeled sets and includes the special
             cases in which one of the labeled sets is very large or, in
             the other extreme, completely missing. We demonstrate our
             estimators in the context of audio-visual word recognition
             and provide comparisons to several recently proposed
             multi-modal learning algorithms. © 2012
             IEEE.},
   Doi = {10.1109/ICASSP.2012.6288336},
   Key = {fds265107}
}

@article{fds265108,
   Author = {Sprechmann, P and Cancela, P and Sapiro, G},
   Title = {Gaussian mixture models for score-informed instrument
             separation},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {49-52},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2012.6287814},
   Abstract = {A new framework for representing quasi-harmonic signals, and
             its application to score-informed single channel musical
             instruments separation, is introduced in this paper. In the
             proposed approach, the signal's pitch and spectral envelope
             are modeled separately. The model combines parametric
             filters enforcing an harmonic structure in the
             representation, with Gaussian modeling for representing the
             spectral envelope. The estimation of the signal's model is
             cast as an inverse problem efficiently solved via a maximum
             a posteriori expectation-maximization algorithm. The
             relation of the proposed framework with common non-negative
             factorization methods is also discussed. The algorithm is
             evaluated with both real and synthetic instruments mixtures,
             and comparisons with recently proposed techniques are
             presented. © 2012 IEEE.},
   Doi = {10.1109/ICASSP.2012.6287814},
   Key = {fds265108}
}

@article{fds265135,
   Author = {Duarte-Carvajalino, JM and Yu, G and Carin, L and Sapiro,
             G},
   Title = {Adapted statistical compressive sensing: Learning to sense
             gaussian mixture models},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {3653-3656},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2012.6288708},
   Abstract = {A framework for learning sensing kernels adapted to signals
             that follow a Gaussian mixture model (GMM) is introduced in
             this paper. This follows the paradigm of statistical
             compressive sensing (SCS), where a statistical model, a GMM
             in particular, replaces the standard sparsity model of
             classical compressive sensing (CS), leading to both
             theoretical and practical improvements. We show that the
             optimized sensing matrix outperforms random sampling
             matrices originally exploited both in CS and SCS. © 2012
             IEEE.},
   Doi = {10.1109/ICASSP.2012.6288708},
   Key = {fds265135}
}

@article{fds265105,
   Author = {Sprechmann, P and Bronstein, A and Sapiro, G},
   Title = {Learning efficient structured sparse models},
   Journal = {Proceedings of the 29th International Conference on Machine
             Learning, ICML 2012},
   Volume = {1},
   Pages = {615-622},
   Year = {2012},
   Month = {October},
   Abstract = {We present a comprehensive framework for structured sparse
             coding and modeling extending the recent ideas of using
             learnable fast regressors to approximate exact sparse codes.
             For this purpose, we propose an efficient feed forward
             architecture derived from the iteration of the
             block-coordinate algorithm. This architecture approximates
             the exact structured sparse codes with a fraction of the
             complexity of the standard optimization methods. We also
             show that by using different training objective functions,
             the proposed learnable sparse encoders are not only
             restricted to be approximants of the exact sparse code for a
             pre-given dictionary, but can be rather used as
             full-featured sparse encoders or even modelers. A simple
             implementation shows several orders of magnitude speedup
             compared to the state-of-the-art exact optimization
             algorithms at minimal performance degradation, making the
             proposed framework suitable for real time and large-scale
             applications. Copyright 2012 by the author(s)/owner(s).},
   Key = {fds265105}
}

@article{fds265104,
   Author = {Elhamifar, E and Sapiro, G and Vidal, R},
   Title = {See all by looking at a few: Sparse modeling for finding
             representative objects},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Pages = {1600-1607},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISSN = {1063-6919},
   url = {http://dx.doi.org/10.1109/CVPR.2012.6247852},
   Abstract = {We consider the problem of finding a few representatives for
             a dataset, i.e., a subset of data points that efficiently
             describes the entire dataset. We assume that each data point
             can be expressed as a linear combination of the
             representatives and formulate the problem of finding the
             representatives as a sparse multiple measurement vector
             problem. In our formulation, both the dictionary and the
             measurements are given by the data matrix, and the unknown
             sparse codes select the representatives via convex
             optimization. In general, we do not assume that the data are
             low-rank or distributed around cluster centers. When the
             data do come from a collection of low-rank models, we show
             that our method automatically selects a few representatives
             from each low-rank model. We also analyze the geometry of
             the representatives and discuss their relationship to the
             vertices of the convex hull of the data. We show that our
             framework can be extended to detect and reject outliers in
             datasets, and to efficiently deal with new observations and
             large datasets. The proposed framework and theoretical
             foundations are illustrated with examples in video
             summarization and image classification using
             representatives. © 2012 IEEE.},
   Doi = {10.1109/CVPR.2012.6247852},
   Key = {fds265104}
}

@article{fds265121,
   Author = {Castrodad, A and Sapiro, G},
   Title = {Sparse modeling of human actions from motion
             imagery},
   Journal = {International Journal of Computer Vision},
   Volume = {100},
   Number = {1},
   Pages = {1-15},
   Publisher = {Springer Nature},
   Year = {2012},
   Month = {October},
   ISSN = {0920-5691},
   url = {http://dx.doi.org/10.1007/s11263-012-0534-7},
   Abstract = {An efficient sparse modeling pipeline for the classification
             of human actions from video is here developed.
             Spatio-temporal features that characterize local changes in
             the image are first extracted. This is followed by the
             learning of a class-structured dictionary encoding the
             individual actions of interest. Classification is then based
             on reconstruction, where the label assigned to each video
             comes from the optimal sparse linear combination of the
             learned basis vectors (action primitives) representing the
             actions. A low computational cost deep-layer model learning
             the inter-class correlations of the data is added for
             increasing discriminative power. In spite of its simplicity
             and low computational cost, the method outperforms
             previously reported results for virtually all standard
             datasets. © 2012 Springer Science+Business Media, LLC
             (outside the USA).},
   Doi = {10.1007/s11263-012-0534-7},
   Key = {fds265121}
}

@article{fds304068,
   Author = {Sprechmann, P and Bronstein, AM and Sapiro, G},
   Title = {Learning Robust Low-Rank Representations},
   Volume = {abs/1209.6393},
   Year = {2012},
   Month = {September},
   url = {http://arxiv.org/abs/1209.6393v1},
   Abstract = {In this paper we present a comprehensive framework for
             learning robust low-rank representations by combining and
             extending recent ideas for learning fast sparse coding
             regressors with structured non-convex optimization
             techniques. This approach connects robust principal
             component analysis (RPCA) with dictionary learning
             techniques and allows its approximation via trainable
             encoders. We propose an efficient feed-forward architecture
             derived from an optimization algorithm designed to exactly
             solve robust low dimensional projections. This architecture,
             in combination with different training objective functions,
             allows the regressors to be used as online approximants of
             the exact offline RPCA problem or as RPCA-based neural
             networks. Simple modifications of these encoders can handle
             challenging extensions, such as the inclusion of geometric
             data transformations. We present several examples with real
             data from image, audio, and video processing. When used to
             approximate RPCA, our basic implementation shows several
             orders of magnitude speedup compared to the exact solvers
             with almost no performance degradation. We show the strength
             of the inclusion of learning to the RPCA approach on a music
             source separation application, where the encoders outperform
             the exact RPCA algorithms, which are already reported to
             produce state-of-the-art results on a benchmark database.
             Our preliminary implementation on an iPad shows
             faster-than-real-time performance with minimal
             latency.},
   Key = {fds304068}
}

@article{fds265103,
   Author = {Ramírez, I and Sapiro, G},
   Title = {Universal regularizers for robust sparse coding and
             modeling.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {21},
   Number = {9},
   Pages = {3850-3864},
   Year = {2012},
   Month = {September},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22562754},
   Abstract = {Sparse data models, where data is assumed to be well
             represented as a linear combination of a few elements from a
             dictionary, have gained considerable attention in recent
             years, and their use has led to state-of-the-art results in
             many signal and image processing tasks. It is now well
             understood that the choice of the sparsity regularization
             term is critical in the success of such models. Based on a
             codelength minimization interpretation of sparse coding, and
             using tools from universal coding theory, we propose a
             framework for designing sparsity regularization terms which
             have theoretical and practical advantages when compared with
             the more standard l(0) or l(1) ones. The presentation of the
             framework and theoretical foundations is complemented with
             examples that show its practical advantages in image
             denoising, zooming and classification.},
   Doi = {10.1109/tip.2012.2197006},
   Key = {fds265103}
}

@article{fds304065,
   Author = {Tang, Z and Castrodad, A and Tepper, M and Sapiro,
             G},
   Title = {Are You Imitating Me? Unsupervised Sparse Modeling for Group
             Activity Analysis from a Single Video},
   Volume = {abs/1208.5451},
   Year = {2012},
   Month = {August},
   url = {http://arxiv.org/abs/1208.5451v1},
   Abstract = {A framework for unsupervised group activity analysis from a
             single video is here presented. Our working hypothesis is
             that human actions lie on a union of low-dimensional
             subspaces, and thus can be efficiently modeled as sparse
             linear combinations of atoms from a learned dictionary
             representing the action's primitives. Contrary to prior art,
             and with the primary goal of spatio-temporal action
             grouping, in this work only one single video segment is
             available for both unsupervised learning and analysis
             without any prior training information. After extracting
             simple features at a single spatio-temporal scale, we learn
             a dictionary for each individual in the video during each
             short time lapse. These dictionaries allow us to compare the
             individuals' actions by producing an affinity matrix which
             contains sufficient discriminative information about the
             actions in the scene leading to grouping with simple and
             efficient tools. With diverse publicly available real
             videos, we demonstrate the effectiveness of the proposed
             framework and its robustness to cluttered backgrounds,
             changes of human appearance, and action variability.},
   Key = {fds304065}
}

@article{fds304070,
   Author = {Tepper, M and Sapiro, G},
   Title = {L1 Splines for Robust, Simple, and Fast Smoothing of Grid
             Data},
   Volume = {abs/1208.2292},
   Year = {2012},
   Month = {August},
   url = {http://arxiv.org/abs/1208.2292v2},
   Abstract = {Splines are a popular and attractive way of smoothing noisy
             data. Computing splines involves minimizing a functional
             which is a linear combination of a fitting term and a
             regularization term. The former is classically computed
             using a (weighted) L2 norm while the latter ensures
             smoothness. Thus, when dealing with grid data, the
             optimization can be solved very efficiently using the DCT.
             In this work we propose to replace the L2 norm in the
             fitting term with an L1 norm, leading to automatic
             robustness to outliers. To solve the resulting minimization
             problem we propose an extremely simple and efficient
             numerical scheme based on split-Bregman iteration combined
             with DCT. Experimental validation shows the high-quality
             results obtained in short processing times.},
   Key = {fds304070}
}

@article{fds264753,
   Author = {Abosch, A and Harel, N and Sapiro, G and Duchin, Y and Yacoub,
             E},
   Title = {178 Utility of 7T Imaging for Deep Brain Stimulation
             Surgery},
   Journal = {Neurosurgery},
   Volume = {71},
   Number = {2},
   Pages = {E569-E570},
   Publisher = {Ovid Technologies (Wolters Kluwer Health)},
   Year = {2012},
   Month = {August},
   ISSN = {0148-396X},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000307109300096&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1227/01.neu.0000417768.55934.bf},
   Key = {fds264753}
}

@article{fds265100,
   Author = {Yatziv, L and Chartouni, M and Datta, S and Sapiro,
             G},
   Title = {Toward multiple catheters detection in fluoroscopic image
             guided interventions.},
   Journal = {IEEE transactions on information technology in biomedicine :
             a publication of the IEEE Engineering in Medicine and
             Biology Society},
   Volume = {16},
   Number = {4},
   Pages = {770-781},
   Year = {2012},
   Month = {July},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22389155},
   Abstract = {Catheters are routinely inserted via vessels to cavities of
             the heart during fluoroscopic image guided interventions for
             electrophysiology (EP) procedures such as ablation. During
             such interventions, the catheter undergoes nonrigid
             deformation due to physician interaction, patient's
             breathing, and cardiac motions. EP clinical applications can
             benefit from fast and accurate automatic catheter tracking
             in the fluoroscopic images. The typical low quality in
             fluoroscopic images and the presence of other medical
             instruments in the scene make the automatic detection and
             tracking of catheters in clinical environments very
             challenging. Toward the development of such an application,
             a robust and efficient method for detecting and tracking the
             catheter sheath is developed. The proposed approach exploits
             the clinical setup knowledge to constrain the search space
             while boosting both tracking speed and accuracy, and is
             based on a computationally efficient framework to trace the
             sheath and simultaneously detect one or multiple catheter
             tips. The algorithm is based on a modification of the fast
             marching weighted distance computation that efficiently
             calculates, on the fly, important geodesic properties in
             relevant regions of the image. This is followed by a cascade
             classifier for detecting the catheter tips. The proposed
             technique is validated on 1107 fluoroscopic images acquired
             on multiple patients across four different clinics,
             achieving multiple catheter tracking at a rate of 10
             images/s with a very low false positive rate of
             1.06.},
   Doi = {10.1109/titb.2012.2189407},
   Key = {fds265100}
}

@article{fds265120,
   Author = {Esser, E and Möller, M and Osher, S and Sapiro, G and Xin,
             J},
   Title = {A convex model for nonnegative matrix factorization and
             dimensionality reduction on physical space.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {21},
   Number = {7},
   Pages = {3239-3252},
   Year = {2012},
   Month = {July},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22410332},
   Abstract = {A collaborative convex framework for factoring a data matrix
             X into a nonnegative product AS , with a sparse coefficient
             matrix S, is proposed. We restrict the columns of the
             dictionary matrix A to coincide with certain columns of the
             data matrix X, thereby guaranteeing a physically meaningful
             dictionary and dimensionality reduction. We use l(1, ∞)
             regularization to select the dictionary from the data and
             show that this leads to an exact convex relaxation of l(0)
             in the case of distinct noise-free data. We also show how to
             relax the restriction-to- X constraint by initializing an
             alternating minimization approach with the solution of the
             convex model, obtaining a dictionary close to but not
             necessarily in X. We focus on applications of the proposed
             framework to hyperspectral endmember and abundance
             identification and also show an application to blind source
             separation of nuclear magnetic resonance
             data.},
   Doi = {10.1109/tip.2012.2190081},
   Key = {fds265120}
}

@article{fds265119,
   Author = {Ramirez, I and Sapiro, G},
   Title = {An MDL framework for sparse coding and dictionary
             learning},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {60},
   Number = {6},
   Pages = {2913-2927},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2012},
   Month = {June},
   ISSN = {1053-587X},
   url = {http://dx.doi.org/10.1109/TSP.2012.2187203},
   Abstract = {The power of sparse signal modeling with learned
             overcomplete dictionaries has been demonstrated in a variety
             of applications and fields, from signal processing to
             statistical inference and machine learning. However, the
             statistical properties of these models, such as underfitting
             or overfitting given sets of data, are still not well
             characterized in the literature. As a result, the success of
             sparse modeling depends on hand-tuning critical parameters
             for each data and application. This work aims at addressing
             this by providing a practical and objective characterization
             of sparse models by means of the minimum description length
             (MDL) principlea well-established information-theoretic
             approach to model selection in statistical inference. The
             resulting framework derives a family of efficient sparse
             coding and dictionary learning algorithms which, by virtue
             of the MDL principle, are completely parameter free.
             Furthermore, such framework allows to incorporate additional
             prior information to existing models, such as Markovian
             dependencies, or to define completely new problem
             formulations, including in the matrix analysis area, in a
             natural way. These virtues will be demonstrated with
             parameter-free algorithms for the classic image denoising
             and classification problems, and for low-rank matrix
             recovery in video applications. However, the framework is
             not limited to this imaging data, and can be applied to a
             wide range of signal and data types and tasks. © 2012
             IEEE.},
   Doi = {10.1109/TSP.2012.2187203},
   Key = {fds265119}
}

@article{fds265099,
   Author = {Walczak, N and Fasching, J and Toczyski, W and Sivalingam, R and Bird,
             N and Cullen, K and Morellas, V and Murphy, B and Sapiro, G and Papanikolopoulos, N},
   Title = {A nonintrusive system for behavioral analysis of children
             using multiple RGB+depth sensors},
   Journal = {Proceedings of IEEE Workshop on Applications of Computer
             Vision},
   Pages = {217-222},
   Publisher = {IEEE},
   Year = {2012},
   Month = {May},
   ISSN = {2158-3978},
   url = {http://dx.doi.org/10.1109/WACV.2012.6163011},
   Abstract = {In developmental disorders such as autism and schizophrenia,
             observing behavioral precursors in very early childhood can
             allow for early intervention and can improve patient
             outcomes. While such precursors open the possibility of
             broad and large-scale screening, until now they have been
             identified only through experts' painstaking examinations
             and their manual annotations of limited, unprocessed video
             footage. Here we introduce a system to automate and assist
             in such procedures. Employing multiple inexpensive real-time
             rgb+depth (rgb+d) sensors recording from multiple
             viewpoints, our non-invasive systemnow installed at the
             Shirley G. Moore Lab School, a research preschool-is being
             developed to monitor and reconstruct the play and
             interactions of preschoolers. The system's role is to help
             in assessing the growing volumes of its on-site recordings
             and to provide the data needed to uncover additional
             neuromotor behavioral markers via techniques such as data
             mining. © 2012 IEEE.},
   Doi = {10.1109/WACV.2012.6163011},
   Key = {fds265099}
}

@article{fds265115,
   Author = {Yu, G and Sapiro, G and Mallat, S},
   Title = {Solving inverse problems with piecewise linear estimators:
             from Gaussian mixture models to structured
             sparsity.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {21},
   Number = {5},
   Pages = {2481-2499},
   Year = {2012},
   Month = {May},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22180506},
   Abstract = {A general framework for solving image inverse problems with
             piecewise linear estimations is introduced in this paper.
             The approach is based on Gaussian mixture models, which are
             estimated via a maximum a posteriori expectation-maximization
             algorithm. A dual mathematical interpretation of the
             proposed framework with a structured sparse estimation is
             described, which shows that the resulting piecewise linear
             estimate stabilizes the estimation when compared with
             traditional sparse inverse problem techniques. We
             demonstrate that, in a number of image inverse problems,
             including interpolation, zooming, and deblurring of narrow
             kernels, the same simple and computationally efficient
             algorithm yields results in the same ballpark as that of the
             state of the art.},
   Doi = {10.1109/tip.2011.2176743},
   Key = {fds265115}
}

@article{fds265116,
   Author = {Mahmoudi, M and Sapiro, G},
   Title = {Sparse representations for range data restoration.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {21},
   Number = {5},
   Pages = {2909-2915},
   Year = {2012},
   Month = {May},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22287242},
   Abstract = {In this paper, the problem of denoising and occlusion
             restoration of 3-D range data based on dictionary learning
             and sparse representation methods is explored. We apply
             these techniques after converting the noisy 3-D surface into
             one or more images. We present experimental results on the
             proposed approaches.},
   Doi = {10.1109/tip.2012.2185940},
   Key = {fds265116}
}

@article{fds265117,
   Author = {Frank, GA and Bartesaghi, A and Kuybeda, O and Borgnia, MJ and White,
             TA and Sapiro, G and Subramaniam, S},
   Title = {Computational separation of conformational heterogeneity
             using cryo-electron tomography and 3D sub-volume
             averaging.},
   Journal = {J Struct Biol},
   Volume = {178},
   Number = {2},
   Pages = {165-176},
   Year = {2012},
   Month = {May},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22248450},
   Abstract = {We have previously used cryo-electron tomography combined
             with sub-volume averaging and classification to obtain 3D
             structures of macromolecular assemblies in cases where a
             single dominant species was present, and applied these
             methods to the analysis of a variety of trimeric HIV-1 and
             SIV envelope glycoproteins (Env). Here, we extend these
             studies by demonstrating automated, iterative, missing
             wedge-corrected 3D image alignment and classification
             methods to distinguish multiple conformations that are
             present simultaneously. We present a method for measuring
             the spatial distribution of the vector elements representing
             distinct conformational states of Env. We identify data
             processing strategies that allow clear separation of the
             previously characterized closed and open conformations, as
             well as unliganded and antibody-liganded states of Env when
             they are present in mixtures. We show that identifying and
             removing spikes with the lowest signal-to-noise ratios
             improves the overall accuracy of alignment between
             individual Env sub-volumes, and that alignment accuracy, in
             turn, determines the success of image classification in
             assessing conformational heterogeneity in heterogeneous
             mixtures. We validate these procedures for computational
             separation by successfully separating and reconstructing
             distinct 3D structures for unliganded and antibody-liganded
             as well as open and closed conformations of Env present
             simultaneously in mixtures.},
   Doi = {10.1016/j.jsb.2012.01.004},
   Key = {fds265117}
}

@article{fds265114,
   Author = {Aganj, I and Lenglet, C and Yacoub, E and Sapiro, G and Harel,
             N},
   Title = {A 3D wavelet fusion approach for the reconstruction of
             isotropic-resolution MR images from orthogonal
             anisotropic-resolution scans.},
   Journal = {Magnetic resonance in medicine},
   Volume = {67},
   Number = {4},
   Pages = {1167-1172},
   Year = {2012},
   Month = {April},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21761448},
   Abstract = {Hardware constraints, scanning time limitations, patient
             movement, and signal-to-noise ratio (SNR) considerations,
             restrict the slice-selection and the in-plane resolutions of
             MRI differently, generally resulting in anisotropic voxels.
             This nonuniform sampling can be problematic, especially in
             image segmentation and clinical examination. To alleviate
             this, the acquisition is divided into (two or) three
             separate scans, with higher in-plane resolutions and thick
             slices, yet orthogonal slice-selection directions. In this
             work, a noniterative wavelet-based approach for combining
             the three orthogonal scans is adopted, and its advantages
             compared with other existing methods, such as Fourier
             techniques, are discussed, including the consideration of
             the actual pulse response of the MRI scanner, and its lower
             computational complexity. Experimental results are shown on
             simulated and real 7 T MRI data.},
   Doi = {10.1002/mrm.23086},
   Key = {fds265114}
}

@article{fds264755,
   Author = {Sapiro, G},
   Title = {Autonomy Revisited: The Question of Mediations and its
             Methodological Implications},
   Journal = {Paragraph},
   Volume = {35},
   Number = {1},
   Pages = {30-48},
   Publisher = {Edinburgh University Press},
   Year = {2012},
   Month = {March},
   ISSN = {0264-8334},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000302670100003&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Abstract = {<jats:p> Bourdieu's concept of the literary field aimed to
             overcome the opposition between internal and external
             analysis of literary works. This paper examines its
             theoretical and methodological implications by exploring the
             notion of mediations between text and context at three
             different levels: the material conditions of production and
             circulation of literary works; the modalities of their
             production by their authors; their critical reception. It is
             through these mediations that the key concept of autonomy
             becomes operational for empirical research and that it
             displays its heuristic power, as illustrated by works using
             Bourdieu's theory of the literary field produced over the
             last two decades. </jats:p>},
   Doi = {10.3366/para.2012.0040},
   Key = {fds264755}
}

@article{fds265134,
   Author = {Xing, Z and Zhou, M and Castrodad, A and Sapiro, G and Carin,
             L},
   Title = {Dictionary learning for noisy and incomplete hyperspectral
             images},
   Journal = {SIAM Journal on Imaging Sciences},
   Volume = {5},
   Number = {1},
   Pages = {33-56},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2012},
   Month = {February},
   ISSN = {1936-4954},
   url = {http://dx.doi.org/10.1137/110837486},
   Abstract = {We consider analysis of noisy and incomplete hyperspectral
             imagery, with the objective of removing the noise and
             inferring the missing data. The noise statistics may be
             wavelength dependent, and the fraction of data missing (at
             random) may be substantial, including potentially entire
             bands, offering the potential to significantly reduce the
             quantity of data that need be measured. To achieve this
             objective, the imagery is divided into contiguous
             three-dimensional (3D) spatio-spectral blocks of spatial
             dimension much less than the image dimension. It is assumed
             that each such 3D block may be represented as a linear
             combination of dictionary elements of the same dimension,
             plus noise, and the dictionary elements are learned in situ
             based on the observed data (no a priori training). The
             number of dictionary elements needed for representation of
             any particular block is typically small relative to the
             block dimensions, and all the image blocks are processed
             jointly ("collaboratively") to infer the underlying
             dictionary. We address dictionary learning from a Bayesian
             perspective, considering two distinct means of imposing
             sparse dictionary usage. These models allow inference of the
             number of dictionary elements needed as well as the
             underlying wavelength-dependent noise statistics. It is
             demonstrated that drawing the dictionary elements from a
             Gaussian process prior, imposing structure on the wavelength
             dependence of the dictionary elements, yields significant
             advantages, relative to the more conventional approach of
             using an independent and identically distributed Gaussian
             prior for the dictionary elements; this advantage is
             particularly evident in the presence of noise. The framework
             is demonstrated by processing hyperspectral imagery with a
             significant number of voxels missing uniformly at random,
             with imagery at specific wavelengths missing entirely, and
             in the presence of substantial additive noise. © 2012
             Society for Industrial and Applied Mathematics.},
   Doi = {10.1137/110837486},
   Key = {fds265134}
}

@article{fds265113,
   Author = {Duarte-Carvajalino, JM and Jahanshad, N and Lenglet, C and McMahon,
             KL and de Zubicaray, GI and Martin, NG and Wright, MJ and Thompson, PM and Sapiro, G},
   Title = {Hierarchical topological network analysis of anatomical
             human brain connectivity and differences related to sex and
             kinship.},
   Journal = {NeuroImage},
   Volume = {59},
   Number = {4},
   Pages = {3784-3804},
   Year = {2012},
   Month = {February},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22108644},
   Abstract = {Modern non-invasive brain imaging technologies, such as
             diffusion weighted magnetic resonance imaging (DWI), enable
             the mapping of neural fiber tracts in the white matter,
             providing a basis to reconstruct a detailed map of brain
             structural connectivity networks. Brain connectivity
             networks differ from random networks in their topology,
             which can be measured using small worldness, modularity, and
             high-degree nodes (hubs). Still, little is known about how
             individual differences in structural brain network
             properties relate to age, sex, or genetic differences.
             Recently, some groups have reported brain network biomarkers
             that enable differentiation among individuals, pairs of
             individuals, and groups of individuals. In addition to
             studying new topological features, here we provide a
             unifying general method to investigate topological brain
             networks and connectivity differences between individuals,
             pairs of individuals, and groups of individuals at several
             levels of the data hierarchy, while appropriately
             controlling false discovery rate (FDR) errors. We apply our
             new method to a large dataset of high quality brain
             connectivity networks obtained from High Angular Resolution
             Diffusion Imaging (HARDI) tractography in 303 young adult
             twins, siblings, and unrelated people. Our proposed approach
             can accurately classify brain connectivity networks based on
             sex (93% accuracy) and kinship (88.5% accuracy). We find
             statistically significant differences associated with sex
             and kinship both in the brain connectivity networks and in
             derived topological metrics, such as the clustering
             coefficient and the communicability matrix.},
   Doi = {10.1016/j.neuroimage.2011.10.096},
   Key = {fds265113}
}

@article{fds264842,
   Author = {Zhou, T and Shan, H and Banerjee, A and Sapiro, G},
   Title = {Kernelized probabilistic matrix factorization: Exploiting
             graphs and side information},
   Journal = {Proceedings of the 12th SIAM International Conference on
             Data Mining, SDM 2012},
   Pages = {403-414},
   Publisher = {SIAM / Omnipress},
   Year = {2012},
   Month = {January},
   ISBN = {9781611972320},
   url = {http://dx.doi.org/10.1137/1.9781611972825},
   Abstract = {We propose a new matrix completion algorithm| Kernelized
             Probabilistic Matrix Factorization (KPMF), which effectively
             incorporates external side information into the matrix
             factorization process. Unlike Probabilistic Matrix
             Factorization (PMF) [14], which assumes an independent
             latent vector for each row (and each column) with Gaussian
             priors, KMPF works with latent vectors spanning all rows
             (and columns) with Gaussian Process (GP) priors. Hence, KPMF
             explicitly captures the underlying (nonlinear) covariance
             structures across rows and columns. This crucial difference
             greatly boosts the performance of KPMF when appropriate side
             information, e.g., users' social network in recommender
             systems, is incorporated. Furthermore, GP priors allow the
             KPMF model to fill in a row that is entirely missing in the
             original matrix based on the side information alone, which
             is not feasible for standard PMF formulation. In our paper,
             we mainly work on the matrix completion problem with a graph
             among the rows and/or columns as side information, but the
             proposed framework can be easily used with other types of
             side information as well. Finally, we demonstrate the
             efficacy of KPMF through two different applications: 1)
             recommender systems and 2) image restoration. Copyright ©
             2012 by the Society for Industrial and Applied
             Mathematics.},
   Doi = {10.1137/1.9781611972825.35},
   Key = {fds264842}
}

@article{fds265101,
   Author = {Sivalingam, R and Cherian, A and Fasching, J and Walczak, N and Bird, N and Morellas, V and Murphy, B and Cullen, K and Lim, K and Sapiro, G and Papanikolopoulos, N},
   Title = {A multi-sensor visual tracking system for behavior
             monitoring of at-risk children},
   Journal = {Proceedings - IEEE International Conference on Robotics and
             Automation},
   Pages = {1345-1350},
   Publisher = {IEEE},
   Year = {2012},
   Month = {January},
   ISSN = {1050-4729},
   url = {http://dx.doi.org/10.1109/ICRA.2012.6225280},
   Abstract = {Clinical studies confirm that mental illnesses such as
             autism, Obsessive Compulsive Disorder (OCD), etc. show
             behavioral abnormalities even at very young ages; the early
             diagnosis of which can help steer effective treatments. Most
             often, the behavior of such at-risk children deviate in very
             subtle ways from that of a normal child; correct diagnosis
             of which requires prolonged and continuous monitoring of
             their activities by a clinician, which is a difficult and
             time intensive task. As a result, the development of
             automation tools for assisting in such monitoring activities
             will be an important step towards effective utilization of
             the diagnostic resources. In this paper, we approach the
             problem from a computer vision standpoint, and propose a
             novel system for the automatic monitoring of the behavior of
             children in their natural environment through the deployment
             of multiple non-invasive sensors (cameras and depth
             sensors). We provide details of our system, together with
             algorithms for the robust tracking of the activities of the
             children. Our experiments, conducted in the Shirley G. Moore
             Laboratory School, demonstrate the effectiveness of our
             methodology. © 2012 IEEE.},
   Doi = {10.1109/ICRA.2012.6225280},
   Key = {fds265101}
}

@article{fds265102,
   Author = {Tong, M and Kim, Y and Zhan, L and Sapiro, G and Lenglet, C and Mueller,
             BA and Thompson, PM and Vese, LA},
   Title = {A VARIATIONAL MODEL FOR DENOISING HIGH ANGULAR RESOLUTION
             DIFFUSION IMAGING.},
   Journal = {Proceedings. IEEE International Symposium on Biomedical
             Imaging},
   Pages = {530-533},
   Year = {2012},
   Month = {January},
   ISSN = {1945-7928},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22902985},
   Abstract = {The presence of noise in High Angular Resolution Diffusion
             Imaging (HARDI) data of the brain can limit the accuracy
             with which fiber pathways of the brain can be extracted. In
             this work, we present a variational model to denoise HARDI
             data corrupted by Rician noise. Numerical experiments are
             performed on three types of data: 2D synthetic data, 3D
             diffusion-weighted Magnetic Resonance Imaging (DW-MRI) data
             of a hardware phantom containing synthetic fibers, and 3D
             real HARDI brain data. Experiments show that our model is
             effective for denoising HARDI-type data while preserving
             important aspects of the fiber pathways such as fractional
             anisotropy and the orientation distribution
             functions.},
   Doi = {10.1109/isbi.2012.6235602},
   Key = {fds265102}
}

@article{fds265110,
   Author = {Cetingul, HE and Cetingül, HE and Nadar, M and Thompson, P and Sapiro,
             G and Lenglet, C},
   Title = {Simultaneous ODF estimation and tractography in
             HARDI.},
   Journal = {Annual International Conference of the IEEE Engineering in
             Medicine and Biology Society. IEEE Engineering in Medicine
             and Biology Society. Annual International
             Conference},
   Volume = {2012},
   Pages = {86-89},
   Year = {2012},
   Month = {January},
   ISSN = {1557-170X},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/23365838},
   Abstract = {We consider the problem of tracking white matter fibers in
             high angular resolution diffusion imaging (HARDI) data while
             simultaneously estimating the local fiber orientation
             profile. Prior work showed that an unscented Kalman filter
             (UKF) can be used for this problem, yet existing algorithms
             employ parametric mixture models to represent water
             diffusion and to define the state space. To address this
             restrictive model dependency, we propose to extend the UKF
             to HARDI data modeled by orientation distribution functions
             (ODFs), a more generic diffusion model. We consider the
             spherical harmonic representation of the HARDI signal as the
             state, enforce nonnegativity of the ODFs, and perform
             tractography using the directions at which the ODFs attain
             their peaks. In simulations, our method outperforms filtered
             two-tensor tractography at different levels of noise by
             achieving a reduction in mean Chamfer error of 0.05 to 0.27
             voxels; it also produced in vivo fiber tracking that is
             consistent with the neuroanatomy.},
   Doi = {10.1109/embc.2012.6345877},
   Key = {fds265110}
}

@article{fds265111,
   Author = {Lenglet, C and Abosch, A and Yacoub, E and De Martino and F and Sapiro, G and Harel, N},
   Title = {Comprehensive in vivo mapping of the human basal ganglia and
             thalamic connectome in individuals using 7T
             MRI.},
   Journal = {PloS one},
   Volume = {7},
   Number = {1},
   Pages = {e29153},
   Year = {2012},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22235267},
   Abstract = {Basal ganglia circuits are affected in neurological
             disorders such as Parkinson's disease (PD), essential
             tremor, dystonia and Tourette syndrome. Understanding the
             structural and functional connectivity of these circuits is
             critical for elucidating the mechanisms of the movement and
             neuropsychiatric disorders, and is vital for developing new
             therapeutic strategies such as deep brain stimulation (DBS).
             Knowledge about the connectivity of the human basal ganglia
             and thalamus has rapidly evolved over recent years through
             non-invasive imaging techniques, but has remained incomplete
             because of insufficient resolution and sensitivity of these
             techniques. Here, we present an imaging and computational
             protocol designed to generate a comprehensive in vivo and
             subject-specific, three-dimensional model of the structure
             and connections of the human basal ganglia. High-resolution
             structural and functional magnetic resonance images were
             acquired with a 7-Tesla magnet. Capitalizing on the enhanced
             signal-to-noise ratio (SNR) and enriched contrast obtained
             at high-field MRI, detailed structural and connectivity
             representations of the human basal ganglia and thalamus were
             achieved. This unique combination of multiple imaging
             modalities enabled the in-vivo visualization of the
             individual human basal ganglia and thalamic nuclei, the
             reconstruction of seven white-matter pathways and their
             connectivity probability that, to date, have only been
             reported in animal studies, histologically, or
             group-averaged MRI population studies. Also described are
             subject-specific parcellations of the basal ganglia and
             thalamus into sub-territories based on their distinct
             connectivity patterns. These anatomical connectivity
             findings are supported by functional connectivity data
             derived from resting-state functional MRI (R-fMRI). This
             work demonstrates new capabilities for studying basal
             ganglia circuitry, and opens new avenues of investigation
             into the movement and neuropsychiatric disorders, in
             individual human subjects.},
   Doi = {10.1371/journal.pone.0029153},
   Key = {fds265111}
}

@article{fds265118,
   Author = {Duchin, Y and Abosch, A and Yacoub, E and Sapiro, G and Harel,
             N},
   Title = {Feasibility of using ultra-high field (7 T) MRI for clinical
             surgical targeting.},
   Journal = {PloS one},
   Volume = {7},
   Number = {5},
   Pages = {e37328},
   Year = {2012},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22615980},
   Abstract = {The advantages of ultra-high magnetic field (7 Tesla) MRI
             for basic science research and neuroscience applications
             have proven invaluable. Structural and functional MR images
             of the human brain acquired at 7 T exhibit rich information
             content with potential utility for clinical applications.
             However, (1) substantial increases in susceptibility
             artifacts, and (2) geometrical distortions at 7 T would be
             detrimental for stereotactic surgeries such as deep brain
             stimulation (DBS), which typically use 1.5 T images for
             surgical planning. Here, we explore whether these issues can
             be addressed, making feasible the use of 7 T MRI to guide
             surgical planning. Twelve patients with Parkinson's disease,
             candidates for DBS, were scanned on a standard clinical 1.5
             T MRI and a 7 T MRI scanner. Qualitative and quantitative
             assessments of global and regional distortion were evaluated
             based on anatomical landmarks and transformation matrix
             values. Our analyses show that distances between identical
             landmarks on 1.5 T vs. 7 T, in the mid-brain region, were
             less than one voxel, indicating a successful co-registration
             between the 1.5 T and 7 T images under these specific
             imaging parameter sets. On regional analysis, the central
             part of the brain showed minimal distortion, while inferior
             and frontal areas exhibited larger distortion due to
             proximity to air-filled cavities. We conclude that 7 T MR
             images of the central brain regions have comparable
             distortions to that observed on a 1.5 T MRI, and that
             clinical applications targeting structures such as the STN,
             are feasible with information-rich 7 T imaging.},
   Doi = {10.1371/journal.pone.0037328},
   Key = {fds265118}
}

@article{fds265133,
   Author = {Zhou, M and Chen, H and Paisley, J and Ren, L and Li, L and Xing, Z and Dunson, D and Sapiro, G and Carin, L},
   Title = {Nonparametric Bayesian dictionary learning for analysis of
             noisy and incomplete images.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {21},
   Number = {1},
   Pages = {130-144},
   Year = {2012},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21693421},
   Abstract = {Nonparametric Bayesian methods are considered for recovery
             of imagery based upon compressive, incomplete, and/or noisy
             measurements. A truncated beta-Bernoulli process is employed
             to infer an appropriate dictionary for the data under test
             and also for image recovery. In the context of compressive
             sensing, significant improvements in image recovery are
             manifested using learned dictionaries, relative to using
             standard orthonormal image expansions. The
             compressive-measurement projections are also optimized for
             the learned dictionary. Additionally, we consider simpler
             (incomplete) measurements, defined by measuring a subset of
             image pixels, uniformly selected at random. Spatial
             interrelationships within imagery are exploited through use
             of the Dirichlet and probit stick-breaking processes.
             Several example results are presented, with comparisons to
             other methods in the literature.},
   Doi = {10.1109/tip.2011.2160072},
   Key = {fds265133}
}

@article{fds264826,
   Author = {Sapiro, G},
   Title = {ON THE STATE Classes at College d eFrance,
             1989-1992},
   Journal = {QUINZAINE LITTERAIRE},
   Number = {1061},
   Pages = {4-4},
   Year = {2012},
   ISSN = {0048-6493},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000305499400001&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264826}
}

@article{fds264840,
   Author = {Bronstein, AM and Sprechmann, P and Sapiro, G},
   Title = {Learning Efficient Structured Sparse Models},
   Journal = {CoRR},
   Volume = {abs/1206.4649},
   Year = {2012},
   Key = {fds264840}
}

@article{fds264855,
   Author = {Ramírez, I and Sapiro, G},
   Title = {LOw-rank data modeling via the minimum description length
             principle.},
   Journal = {ICASSP},
   Pages = {2165-2168},
   Publisher = {IEEE},
   Year = {2012},
   ISBN = {978-1-4673-0046-9},
   url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=6268628},
   Doi = {10.1109/ICASSP.2012.6288341},
   Key = {fds264855}
}

@article{fds265122,
   Author = {Tran, EEH and Borgnia, MJ and Kuybeda, O and Schauder, DM and Bartesaghi, A and Frank, GA and Sapiro, G and Milne, JLS and Subramaniam, S},
   Title = {Structural mechanism of trimeric HIV-1 envelope glycoprotein
             activation.},
   Journal = {PLoS Pathog},
   Volume = {8},
   Number = {7},
   Pages = {e1002797},
   Year = {2012},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22807678},
   Abstract = {HIV-1 infection begins with the binding of trimeric viral
             envelope glycoproteins (Env) to CD4 and a co-receptor on
             target T-cells. Understanding how these ligands influence
             the structure of Env is of fundamental interest for HIV
             vaccine development. Using cryo-electron microscopy, we
             describe the contrasting structural outcomes of trimeric Env
             binding to soluble CD4, to the broadly neutralizing,
             CD4-binding site antibodies VRC01, VRC03 and b12, or to the
             monoclonal antibody 17b, a co-receptor mimic. Binding of
             trimeric HIV-1 BaL Env to either soluble CD4 or 17b alone,
             is sufficient to trigger formation of the open quaternary
             conformation of Env. In contrast, VRC01 locks Env in the
             closed state, while b12 binding requires a partial opening
             in the quaternary structure of trimeric Env. Our results
             show that, despite general similarities in regions of the
             HIV-1 gp120 polypeptide that contact CD4, VRC01, VRC03 and
             b12, there are important differences in quaternary
             structures of the complexes these ligands form on native
             trimeric Env, and potentially explain differences in the
             neutralizing breadth and potency of antibodies with similar
             specificities. From cryo-electron microscopic analysis at
             ∼9 Å resolution of a cleaved, soluble version of trimeric
             Env, we show that a structural signature of the open Env
             conformation is a three-helix motif composed of α-helical
             segments derived from highly conserved, non-glycosylated
             N-terminal regions of the gp41 trimer. The three N-terminal
             gp41 helices in this novel, activated Env conformation are
             held apart by their interactions with the rest of Env, and
             are less compactly packed than in the post-fusion, six-helix
             bundle state. These findings suggest a new structural
             template for designing immunogens that can elicit antibodies
             targeting HIV at a vulnerable, pre-entry
             stage.},
   Doi = {10.1371/journal.ppat.1002797},
   Key = {fds265122}
}

@article{fds304072,
   Author = {Duarte-Carvajalino, J and Sapiro, G and Yu, G and Carin,
             L},
   Title = {Online Adaptive Statistical Compressed Sensing of Gaussian
             Mixture Models},
   Volume = {abs/1112.5895},
   Year = {2011},
   Month = {December},
   url = {http://arxiv.org/abs/1112.5895v1},
   Abstract = {A framework of online adaptive statistical compressed
             sensing is introduced for signals following a mixture model.
             The scheme first uses non-adaptive measurements, from which
             an online decoding scheme estimates the model selection. As
             soon as a candidate model has been selected, an optimal
             sensing scheme for the selected model continues to apply.
             The final signal reconstruction is calculated from the
             ensemble of both the non-adaptive and the adaptive
             measurements. For signals generated from a Gaussian mixture
             model, the online adaptive sensing algorithm is given and
             its performance is analyzed. On both synthetic and real
             image data, the proposed adaptive scheme considerably
             reduces the average reconstruction error with respect to
             standard statistical compressed sensing that uses fully
             random measurements, at a marginally increased computational
             complexity.},
   Key = {fds304072}
}

@article{fds265090,
   Author = {Bar, L and Sapiro, G},
   Title = {Hierarchical invariant sparse modeling for image
             analysis},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Pages = {2397-2400},
   Publisher = {IEEE},
   Year = {2011},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2011.6116125},
   Abstract = {Sparse representation theory has been increasingly used in
             signal processing and machine learning. In this paper we
             introduce a hierarchical sparse modeling approach which
             integrates information from the image patch level to derive
             a mid-level invariant image and pattern representation. The
             proposed framework is based on a hierarchical architecture
             of dictionary learning for sparse coding in a cortical
             (log-polar) space, combined with a novel pooling operator
             which incorporates the Rapid transform and max pooling to
             attain rotation and scale invariance. The invariant sparse
             representation of patterns here presented - can be used in
             different object recognition tasks. Promising results are
             obtained for three applications - 2D shapes classification,
             texture recognition and object detection. © 2011
             IEEE.},
   Doi = {10.1109/ICIP.2011.6116125},
   Key = {fds265090}
}

@article{fds265098,
   Author = {Yu, G and Sapiro, G},
   Title = {Statistical compressed sensing of Gaussian mixture
             models},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {59},
   Number = {12},
   Pages = {5842-5858},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {December},
   ISSN = {1053-587X},
   url = {http://dx.doi.org/10.1109/TSP.2011.2168521},
   Abstract = {A novel framework of compressed sensing, namely statistical
             compressed sensing (SCS), that aims at efficiently sampling
             a collection of signals that follow a statistical
             distribution, and achieving accurate reconstruction on
             average, is introduced. SCS based on Gaussian models is
             investigated in depth. For signals that follow a single
             Gaussian model, with Gaussian or Bernoulli sensing matrices
             of ${\cal O}(k)$ measurements, considerably smaller than the
             ${\cal O}(k \log(N/k))$ required by conventional CS based on
             sparse models, where $N$ is the signal dimension, and with
             an optimal decoder implemented via linear filtering,
             significantly faster than the pursuit decoders applied in
             conventional CS, the error of SCS is shown tightly upper
             bounded by a constant times the best $k$-term approximation
             error, with overwhelming probability. The failure
             probability is also significantly smaller than that of
             conventional sparsity-oriented CS. Stronger yet simpler
             results further show that for any sensing matrix, the error
             of Gaussian SCS is upper bounded by a constant times the
             best $k$-term approximation with probability one, and the
             bound constant can be efficiently calculated. For Gaussian
             mixture models (GMMs), that assume multiple Gaussian
             distributions and that each signal follows one of them with
             an unknown index, a piecewise linear estimator is introduced
             to decode SCS. The accuracy of model selection, at the heart
             of the piecewise linear decoder, is analyzed in terms of the
             properties of the Gaussian distributions and the number of
             sensing measurements. A maximization-maximization (Max-Max)
             algorithm that iteratively estimates the Gaussian models
             parameters, the signals model selection, and decodes the
             signals, is presented for GMM-based SCS. In real image
             sensing applications, GMM-based SCS is shown to lead to
             improved results compared to conventional CS, at a
             considerably lower computational cost. © 2011
             IEEE.},
   Doi = {10.1109/TSP.2011.2168521},
   Key = {fds265098}
}

@article{fds265130,
   Author = {Zhou, M and Carin, L and Yang, H and Dunson, D and Sapiro,
             G},
   Title = {Dependent hierarchical beta process for image interpolation
             and denoising},
   Journal = {Journal of Machine Learning Research},
   Volume = {15},
   Pages = {883-891},
   Year = {2011},
   Month = {December},
   ISSN = {1532-4435},
   Abstract = {A dependent hierarchical beta process (dHBP) is developed as
             a prior for data that may be represented in terms of a
             sparse set of latent features, with covariate-dependent
             feature usage. The dHBP is applicable to general covariates
             and data models, imposing that signals with similar
             covariates are likely to be manifested in terms of similar
             features. Coupling the dHBP with the Bernoulli process, and
             upon marginalizing out the dHBP, the model may be
             interpreted as a covariate-dependent hierarchical Indian
             buffet process. As applications, we consider interpolation
             and denoising of an image, with covariates defined by the
             location of image patches within an image. Two types of
             noise models are considered: (i) typical white Gaussian
             noise; and (ii) spiky noise of arbitrary amplitude,
             distributed uniformly at random. In these examples, the
             features correspond to the atoms of a dictionary, learned
             based upon the data under test (without a priori training
             data). State-of-the-art performance is demonstrated, with
             efficient inference using hybrid Gibbs, Metropolis-Hastings
             and slice sampling. Copyright 2011 by the
             authors.},
   Key = {fds265130}
}

@article{fds265084,
   Author = {Jin, Y and Shi, Y and Jahanshad, N and Aganj, I and Sapiro, G and Toga, AW and Thompson, PM},
   Title = {3D elastic registration improves HARDI-derived fiber
             alignment and automated tract clustering},
   Journal = {Proceedings - International Symposium on Biomedical
             Imaging},
   Pages = {822-826},
   Publisher = {IEEE},
   Year = {2011},
   Month = {November},
   ISSN = {1945-7928},
   url = {http://dx.doi.org/10.1109/ISBI.2011.5872531},
   Abstract = {High angular resolution diffusion imaging (HARDI) allows
             population studies of fiber integrity and connectivity.
             Tractography can extract individual fibers. For group
             studies, fibers must be clustered into recognizable bundles
             found consistently across subjects. Nonlinear image
             registration may improve population clustering. To test
             this, we performed whole-brain tractography with an
             orientation distribution function based Hough transform
             method in 20 young adults scanned with 4 Tesla, 105-gradient
             HARDI. We warped all extracted fibers to a
             geometrically-centered template using a 3D elastic
             registration driven by fractional anisotropy maps, to align
             embedded tracts. Fiber alignment was evaluated by
             calculating distances among corresponding fibers across
             subjects. Before and after warping, we performed spectral
             clustering of the fibers using a k-means method, based on
             eigenvectors of a fiber similarity matrix. In tests with an
             overlap metric, non-rigid fiber warping yielded more robust
             clustering results. Non-rigid warping is therefore
             advantageous for population studies using multi-subject
             tract clustering. © 2011 IEEE.},
   Doi = {10.1109/ISBI.2011.5872531},
   Key = {fds265084}
}

@article{fds265085,
   Author = {Caruyer, E and Aganj, I and Lenglet, C and Sapiro, G and Deriche,
             R},
   Title = {Online motion detection in high angular resolution diffusion
             imaging},
   Journal = {Proceedings - International Symposium on Biomedical
             Imaging},
   Pages = {516-519},
   Publisher = {IEEE},
   Year = {2011},
   Month = {November},
   ISSN = {1945-7928},
   url = {http://dx.doi.org/10.1109/ISBI.2011.5872458},
   Abstract = {The orientation distribution function (ODF) can be
             reconstructed online incrementally from diffusion-weighted
             MRI with a Kalman filtering framework. This online
             reconstruction can provide real-time feedback to the
             practitioner, especially appreciated for long acquisition
             protocols typical in Q-ball imaging. On top of the Kalman
             filter, we propose a method to evaluate online the
             reconstruction accuracy of the estimated ODF in constant
             solid angle. In addition, monitoring the residuals of the
             Kalman filter, we design, based on statistical tests, two
             algorithms for online detection of subject motion. The
             proposed techniques, tested on real and synthetic data under
             various experimental conditions, can detect rotation by
             angle less than 3. © 2011 IEEE.},
   Doi = {10.1109/ISBI.2011.5872458},
   Key = {fds265085}
}

@article{fds265086,
   Author = {Jahanshad, N and Aganj, I and Lenglet, C and Joshi, A and Jin, Y and Barysheva, M and McMahon, KL and De Zubicaray and GI and Martin, NG and Wright, MJ and Toga, AW and Sapiro, G and Thompson,
             PM},
   Title = {Sex differences in the human connectome: 4-Tesla high
             angular resolution diffusion imaging (HARDI) tractography in
             234 young adult twins},
   Journal = {Proceedings - International Symposium on Biomedical
             Imaging},
   Pages = {939-943},
   Publisher = {IEEE},
   Year = {2011},
   Month = {November},
   ISSN = {1945-7928},
   url = {http://dx.doi.org/10.1109/ISBI.2011.5872558},
   Abstract = {Cortical connectivity is associated with cognitive and
             behavioral traits that are thought to vary between sexes.
             Using high-angular resolution diffusion imaging at 4 Tesla,
             we scanned 234 young adult twins and siblings (mean age:
             23.4 2.0 SD years) with 94 diffusion-encoding directions. We
             applied a novel Hough transform method to extract fiber
             tracts throughout the entire brain, based on fields of
             constant solid angle orientation distribution functions
             (ODFs). Cortical surfaces were generated from each subject's
             3D T1-weighted structural MRI scan, and tracts were aligned
             to the anatomy. Network analysis revealed the proportions of
             fibers interconnecting 5 key subregions of the frontal
             cortex, including connections between hemispheres. We found
             significant sex differences (147 women/87 men) in the
             proportions of fibers connecting contralateral superior
             frontal cortices. Interhemispheric connectivity was greater
             in women, in line with long-standing theories of hemispheric
             specialization. These findings may be relevant for ongoing
             studies of the human connectome. © 2011
             IEEE.},
   Doi = {10.1109/ISBI.2011.5872558},
   Key = {fds265086}
}

@article{fds265087,
   Author = {Zhan, L and Leow, AD and Aganj, I and Lenglet, C and Sapiro, G and Yacoub,
             E and Harel, N and Toga, AW and Thompson, PM},
   Title = {Differential information content in staggered multiple shell
             hardi measured by the tensor distribution
             function},
   Journal = {Proceedings - International Symposium on Biomedical
             Imaging},
   Pages = {305-309},
   Publisher = {IEEE},
   Year = {2011},
   Month = {November},
   ISSN = {1945-7928},
   url = {http://dx.doi.org/10.1109/ISBI.2011.5872411},
   Abstract = {Diffusion tensor imaging has accelerated the study of brain
             connectivity, but single-tensor diffusion models are too
             simplistic to model fiber crossing and mixing. Hybrid
             diffusion imaging (HYDI) samples the radial and angular
             structure of local diffusion on multiple spherical shells in
             q-space, combining the high SNR and CNR achievable at low
             and high b-values, respectively. We acquired and analyzed
             human multi-shell HARDI at ultra-high field-strength (7
             Tesla; b=1000, 2000, 3000 s/mm2). In experiments with the
             tensor distribution function (TDF), the b-value affected the
             intrinsic uncertainty for estimating component fiber
             orientations and their diffusion eigenvalues. We computed
             orientation density functions by least-squares fitting in
             multiple HARDI shells simultaneously. Within the range
             examined, higher b-values gave improved orientation
             estimates but poorer eigenvalue estimates; lower b-values
             showed opposite strengths and weaknesses. Combining these
             strengths, multiple-shell HARDI, especially with staggered
             angular sampling, outperformed single-shell scanning
             protocols, even when overall scanning time was held
             constant. © 2011 IEEE.},
   Doi = {10.1109/ISBI.2011.5872411},
   Key = {fds265087}
}

@article{fds265097,
   Author = {Sapiro, G},
   Title = {Comparing shapes, understanding evolution.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {108},
   Number = {45},
   Pages = {18189-18190},
   Year = {2011},
   Month = {November},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22042848},
   Doi = {10.1073/pnas.1114928108},
   Key = {fds265097}
}

@article{fds265132,
   Author = {Castrodad, A and Xing, Z and Greer, JB and Bosch, E and Carin, L and Sapiro, G},
   Title = {Learning discriminative sparse representations for modeling,
             source separation, and mapping of hyperspectral
             imagery},
   Journal = {IEEE Transactions on Geoscience and Remote
             Sensing},
   Volume = {49},
   Number = {11 PART 1},
   Pages = {4263-4281},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {November},
   ISSN = {0196-2892},
   url = {http://dx.doi.org/10.1109/TGRS.2011.2163822},
   Abstract = {A method is presented for subpixel modeling, mapping, and
             classification in hyperspectral imagery using learned
             block-structured discriminative dictionaries, where each
             block is adapted and optimized to represent a material in a
             compact and sparse manner. The spectral pixels are modeled
             by linear combinations of subspaces defined by the learned
             dictionary atoms, allowing for linear mixture analysis. This
             model provides flexibility in source representation and
             selection, thus accounting for spectral variability,
             small-magnitude errors, and noise. A spatial-spectral
             coherence regularizer in the optimization allows pixel
             classification to be influenced by similar neighbors. We
             extend the proposed approach for cases for which there is no
             knowledge of the materials in the scene, unsupervised
             classification, and provide experiments and comparisons with
             simulated and real data. We also present results when the
             data have been significantly undersampled and then
             reconstructed, still retaining high-performance
             classification, showing the potential role of compressive
             sensing and sparse modeling techniques in efficient
             acquisition/transmission missions for hyperspectral imagery.
             © 2006 IEEE.},
   Doi = {10.1109/TGRS.2011.2163822},
   Key = {fds265132}
}

@article{fds265128,
   Author = {Chen, B and Polatkan, G and Sapiro, G and Dunson, DB and Carin,
             L},
   Title = {The hierarchical beta process for convolutional factor
             analysis and deep learning},
   Journal = {Proceedings of the 28th International Conference on Machine
             Learning, ICML 2011},
   Pages = {361-368},
   Year = {2011},
   Month = {October},
   Abstract = {A convolutional factor-analysis model is developed, with the
             number of filters (factors) inferred via the beta process
             (BP) and hierarchical BP, for single-task and multi-task
             learning, respectively. The computation of the model
             parameters is implemented within a Bayesian setting,
             employing Gibbs sampling; we explicitly exploit the
             convolutional nature of the expansion to accelerate
             computations. The model is used in a multi-level ("deep")
             analysis of general data, with specific results presented
             for image-processing data sets, e.g., classification.
             Copyright 2011 by the author(s)/owner(s).},
   Key = {fds265128}
}

@article{fds265129,
   Author = {Li, L and Zhou, M and Sapiro, G and Carin, L},
   Title = {On the integration of topic modeling and dictionary
             learning},
   Journal = {Proceedings of the 28th International Conference on Machine
             Learning, ICML 2011},
   Pages = {625-632},
   Year = {2011},
   Month = {October},
   Abstract = {A new nonparametric Bayesian model is developed to integrate
             dictionary learning and topic model into a unified
             framework. The model is employed to analyze partially
             annotated images, with the dictionary learning performed
             directly on image patches. Efficient inference is performed
             with a Gibbs-slice sampler, and encouraging results are
             reported on widely used datasets. Copyright 2011 by the
             author(s)/owner(s).},
   Key = {fds265129}
}

@article{fds265094,
   Author = {Sprechmann, P and Ramírez, I and Sapiro, G and Eldar,
             YC},
   Title = {C-HiLasso: A collaborative hierarchical sparse modeling
             framework},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {59},
   Number = {9},
   Pages = {4183-4198},
   Year = {2011},
   Month = {September},
   ISSN = {1053-587X},
   url = {http://dx.doi.org/10.1109/TSP.2011.2157912},
   Abstract = {Sparse modeling is a powerful framework for data analysis
             and processing. Traditionally, encoding in this framework is
             performed by solving an ℓ1-regularized linear regression
             problem, commonly referred to as Lasso or Basis Pursuit. In
             this work we combine the sparsity-inducing property of the
             Lasso at the individual feature level, with the
             block-sparsity property of the Group Lasso, where sparse
             groups of features are jointly encoded, obtaining a sparsity
             pattern hierarchically structured. This results in the
             Hierarchical Lasso (HiLasso), which shows important
             practical advantages. We then extend this approach to the
             collaborative case, where a set of simultaneously coded
             signals share the same sparsity pattern at the higher
             (group) level, but not necessarily at the lower (inside the
             group) level, obtaining the collaborative HiLasso model
             (C-HiLasso). Such signals then share the same active groups,
             or classes, but not necessarily the same active set. This
             model is very well suited for applications such as source
             identification and separation. An efficient optimization
             procedure, which guarantees convergence to the global
             optimum, is developed for these new models. The underlying
             presentation of the framework and optimization approach is
             complemented by experimental examples and theoretical
             results regarding recovery guarantees. © 2011
             IEEE.},
   Doi = {10.1109/TSP.2011.2157912},
   Key = {fds265094}
}

@article{fds265096,
   Author = {Yatziv, L and Ibarz, J and Strobel, N and Datta, S and Sapiro,
             G},
   Title = {Esophagus silhouette extraction and reconstruction from
             fluoroscopic views for cardiac ablation procedure
             guidance.},
   Journal = {IEEE transactions on information technology in biomedicine :
             a publication of the IEEE Engineering in Medicine and
             Biology Society},
   Volume = {15},
   Number = {5},
   Pages = {703-708},
   Year = {2011},
   Month = {September},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21775266},
   Abstract = {Cardiac ablation involves the risk of serious complications
             when thermal injury to the esophagus occurs. This paper
             proposes to reduce the risk of such injuries by a proactive
             visualization technique, improving physician awareness of
             the esophagus location in the absence of or in addition to a
             reactive monitoring device such as a thermal probe. This is
             achieved by combining a graphical representation of the
             esophagus with live fluoroscopy. Toward this goal, we
             present an automated method to reconstruct and visualize a
             3-D esophagus model from fluoroscopy image sequences
             acquired using different C-arm viewing directions. In order
             to visualize the esophagus under fluoroscopy, it is first
             biomarked by swallowing a contrast agent such as barium.
             Images obtained in this procedure are then used to
             automatically extract the 2-D esophagus silhouette and
             reconstruct a 3-D surface of the esophagus internal wall.
             Once the 3-D representation has been computed, it can be
             visualized using fluoroscopy overlay techniques. Compared to
             3-D esophagus imaging using CT or C-arm CT, our proposed
             fluoroscopy method requires low radiation dose and enables a
             simpler workflow on geometry-calibrated standard C-arm
             systems.},
   Doi = {10.1109/titb.2011.2162247},
   Key = {fds265096}
}

@article{fds265080,
   Author = {Yu, G and Sapiro, G},
   Title = {Statistical compressive sensing of Gaussian mixture
             models},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {3728-3731},
   Year = {2011},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2011.5947161},
   Abstract = {A new framework of compressive sensing (CS), namely
             statistical compressive sensing (SCS), that aims at
             efficiently sampling a collection of signals that follow a
             statistical distribution and achieving accurate
             reconstruction on average, is introduced. For signals
             following a Gaussian distribution, with Gaussian or
             Bernoulli sensing matrices of O(k) measurements,
             considerably smaller than the O(k log(N/k)) required by
             conventional CS, where N is the signal dimension, and with
             an optimal decoder implemented with linear filtering,
             significantly faster than the pursuit decoders applied in
             conventional CS, the error of SCS is shown tightly upper
             bounded by a constant times the best k-term approximation
             error, with overwhelming probability. The failure
             probability is also significantly smaller than that of
             conventional CS. Stronger yet simpler results further show
             that for any sensing matrix, the error of Gaussian SCS is
             upper bounded by a constant times the best k-term
             approximation with probability one, and the bound constant
             can be efficiently calculated. For signals following
             Gaussian mixture models, SCS with a piecewise linear decoder
             is introduced and shown to produce for real images better
             results than conventional CS based on sparse models. © 2011
             IEEE.},
   Doi = {10.1109/ICASSP.2011.5947161},
   Key = {fds265080}
}

@article{fds265081,
   Author = {Sprechmann, P and Ramirez, I and Cancela, P and Sapiro,
             G},
   Title = {Collaborative sources identification in mixed signals via
             hierarchical sparse modeling},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {5816-5819},
   Publisher = {IEEE},
   Year = {2011},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2011.5947683},
   Abstract = {A collaborative framework for detecting the different
             sources in mixed signals is presented in this paper. The
             approach is based on C-HiLasso, a convex collaborative
             hierarchical sparse model, and proceeds as follows. First,
             we build a structured dictionary for mixed signals by
             concatenating a set of sub-dictionaries, each one of them
             learned to sparsely model one of a set of possible classes.
             Then, the coding of the mixed signal is performed by
             efficiently solving a convex optimization problem that
             combines standard sparsity with group and collaborative
             sparsity. The present sources are identified by looking at
             the sub-dictionaries automatically selected in the coding.
             The collaborative filtering in C-HiLasso takes advantage of
             the temporal/spatial redundancy in the mixed signals,
             letting collections of samples collaborate in identifying
             the classes, while allowing individual samples to have
             different internal sparse representations. This
             collaboration is critical to further stabilize the sparse
             representation of signals, in particular the
             class/sub-dictionary selection. The internal sparsity inside
             the sub-dictionaries, as naturally incorporated by the
             hierarchical aspects of C-HiLasso, is critical to make the
             model consistent with the essence of the sub-dictionaries
             that have been trained for sparse representation of each
             individual class. We present applications from speaker and
             instrument identification and texture separation. In the
             case of audio signals, we use sparse modeling to describe
             the short-term power spectrum envelopes of harmonic sounds.
             The proposed pitch independent method automatically detects
             the number of sources on a recording. © 2011
             IEEE.},
   Doi = {10.1109/ICASSP.2011.5947683},
   Key = {fds265081}
}

@article{fds265082,
   Author = {Léger, F and Yu, G and Sapiro, G},
   Title = {Efficient matrix completion with Gaussian
             models},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {1113-1116},
   Publisher = {IEEE},
   Year = {2011},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2011.5946603},
   Abstract = {A general framework based on Gaussian models and a MAPEM
             algorithm is introduced in this paper for solving matrix/
             table completion problems. The numerical experiments with
             the standard and challenging movie ratings data show that
             the proposed approach, based on probably one of the simplest
             probabilistic models, leads to the results in the same
             ballpark as the state-of-the-art, at a lower computational
             cost. © 2011 IEEE.},
   Doi = {10.1109/ICASSP.2011.5946603},
   Key = {fds265082}
}

@article{fds265083,
   Author = {Ramírez, I and Sapiro, G},
   Title = {Sparse coding and dictionary learning based on the MDL
             principle},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {2160-2163},
   Publisher = {IEEE},
   Year = {2011},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2011.5946755},
   Abstract = {The power of sparse signal coding with learned overcomplete
             dictionaries has been demonstrated in a variety of
             applications and fields, from signal processing to
             statistical inference and machine learning. However, the
             statistical properties of these models, such as underfitting
             or overfitting given sets of data, are still not well
             characterized in the literature. This work aims at filling
             this gap by means of the Minimum Description Length (MDL)
             principle - a well established information-theoretic
             approach to statistical inference. The resulting framework
             derives a family of efficient sparse coding and modeling
             (dictionary learning) algorithms, which by virtue of the MDL
             principle, are completely parameter free. Furthermore, such
             framework allows to incorporate additional prior information
             in the model, such as Markovian dependencies, in a natural
             way. We demonstrate the performance of the proposed
             framework with results for image denoising and
             classification tasks. © 2011 IEEE.},
   Doi = {10.1109/ICASSP.2011.5946755},
   Key = {fds265083}
}

@article{fds265127,
   Author = {Zhou, M and Yang, H and Sapiro, G and Dunson, D and Carin,
             L},
   Title = {Covariate-dependent dictionary learning and sparse
             coding},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {5824-5827},
   Publisher = {IEEE},
   Year = {2011},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2011.5947685},
   Abstract = {A dependent hierarchical beta process (dHBP) is developed as
             a prior for data that may be represented in terms of a
             sparse set of latent features (dictionary elements), with
             covariate-dependent feature usage. The dHBP is applicable to
             general covariates and data models, imposing that signals
             with similar covariates are likely to be manifested in terms
             of similar features. As an application, we consider the
             simultaneous sparse modeling of multiple images, with the
             covariate of a given image linked to its similarity to all
             other images (as applied in manifold learning). Efficient
             inference is performed using hybrid Gibbs,
             Metropolis-Hastings and slice sampling. © 2011
             IEEE.},
   Doi = {10.1109/ICASSP.2011.5947685},
   Key = {fds265127}
}

@article{fds265095,
   Author = {Aganj, I and Lenglet, C and Jahanshad, N and Yacoub, E and Harel, N and Thompson, PM and Sapiro, G},
   Title = {A Hough transform global probabilistic approach to
             multiple-subject diffusion MRI tractography.},
   Journal = {Medical image analysis},
   Volume = {15},
   Number = {4},
   Pages = {414-425},
   Year = {2011},
   Month = {August},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21376655},
   Abstract = {A global probabilistic fiber tracking approach based on the
             voting process provided by the Hough transform is introduced
             in this work. The proposed framework tests candidate 3D
             curves in the volume, assigning to each one a score computed
             from the diffusion images, and then selects the curves with
             the highest scores as the potential anatomical connections.
             The algorithm avoids local minima by performing an
             exhaustive search at the desired resolution. The technique
             is easily extended to multiple subjects, considering a
             single representative volume where the registered
             high-angular resolution diffusion images (HARDI) from all
             the subjects are non-linearly combined, thereby obtaining
             population-representative tracts. The tractography algorithm
             is run only once for the multiple subjects, and no tract
             alignment is necessary. We present experimental results on
             HARDI volumes, ranging from simulated and 1.5T physical
             phantoms to 7T and 4T human brain and 7T monkey brain
             datasets.},
   Doi = {10.1016/j.media.2011.01.003},
   Key = {fds265095}
}

@article{fds265092,
   Author = {Arias, P and Facciolo, G and Caselles, V and Sapiro,
             G},
   Title = {A variational framework for exemplar-based image
             inpainting},
   Journal = {International Journal of Computer Vision},
   Volume = {93},
   Number = {3},
   Pages = {319-347},
   Publisher = {Springer Nature},
   Year = {2011},
   Month = {July},
   ISSN = {0920-5691},
   url = {http://dx.doi.org/10.1007/s11263-010-0418-7},
   Abstract = {Non-local methods for image denoising and inpainting have
             gained considerable attention in recent years. This is in
             part due to their superior performance in textured images, a
             known weakness of purely local methods. Local methods on the
             other hand have demonstrated to be very appropriate for the
             recovering of geometric structures such as image edges. The
             synthesis of both types of methods is a trend in current
             research. Variational analysis in particular is an
             appropriate tool for a unified treatment of local and
             nonlocal methods. In this work we propose a general
             variational framework for non-local image inpainting, from
             which important and representative previous inpainting
             schemes can be derived, in addition to leading to novel
             ones. We explicitly study some of these, relating them to
             previous work and showing results on synthetic and real
             images. © 2010 Springer Science+Business Media,
             LLC.},
   Doi = {10.1007/s11263-010-0418-7},
   Key = {fds265092}
}

@article{fds265093,
   Author = {Wirth, B and Bar, L and Rumpf, M and Sapiro, G},
   Title = {A continuum mechanical approach to geodesics in shape
             space},
   Journal = {International Journal of Computer Vision},
   Volume = {93},
   Number = {3},
   Pages = {293-318},
   Publisher = {Springer Nature},
   Year = {2011},
   Month = {July},
   ISSN = {0920-5691},
   url = {http://dx.doi.org/10.1007/s11263-010-0416-9},
   Abstract = {In this paper concepts from continuum mechanics are used to
             define geodesic paths in the space of shapes, where shapes
             are implicitly described as boundary contours of objects.
             The proposed shape metric is derived from a continuum
             mechanical notion of viscous dissipation. A geodesic path is
             defined as the family of shapes such that the total amount
             of viscous dissipation caused by an optimal material
             transport along the path is minimized. The approach can
             easily be generalized to shapes given as segment contours of
             multi-labeled images and to geodesic paths between partially
             occluded objects. The proposed computational framework for
             finding such a minimizer is based on the time discretization
             of a geodesic path as a sequence of pairwise matching
             problems, which is strictly invariant with respect to rigid
             body motions and ensures a 1-1 correspondence along the
             induced flow in shape space. When decreasing the time step
             size, the proposed model leads to the minimization of the
             actual geodesic length, where the Hessian of the pairwise
             matching energy reflects the chosen Riemannian metric on the
             underlying shape space. If the constraint of pairwise shape
             correspondence is replaced by the volume of the shape
             mismatch as a penalty functional, one obtains for decreasing
             time step size an optical flow term controlling the
             transport of the shape by the underlying motion field. The
             method is implemented via a level set representation of
             shapes, and a finite element approximation is employed as
             spatial discretization both for the pairwise matching
             deformations and for the level set representations. The
             numerical relaxation of the energy is performed via an
             efficient multi-scale procedure in space and time. Various
             examples for 2D and 3D shapes underline the effectiveness
             and robustness of the proposed approach. © 2010 Springer
             Science+Business Media, LLC.},
   Doi = {10.1007/s11263-010-0416-9},
   Key = {fds265093}
}

@article{fds265091,
   Author = {Sapiro, G},
   Title = {Technical perspective: Images everywhere looking for
             models},
   Journal = {Communications of the ACM},
   Volume = {54},
   Number = {5},
   Pages = {108},
   Publisher = {Association for Computing Machinery (ACM)},
   Year = {2011},
   Month = {May},
   ISSN = {0001-0782},
   url = {http://dx.doi.org/10.1145/1941487.1941512},
   Abstract = {Deriving appropriate regularization terms, priors or models,
             has occupied the research community since the early days of
             digital image processing. Different image models can be
             appropriate for different types of images; for example, MRI
             and natural images should have different models. The basic
             underlying concept is that local image information repeats
             itself across the non-local image. Noise, on the other hand,
             is expected in numerous scenarios to be random. Therefore,
             collecting those similar local regions all across the image,
             the noise can be eliminated by simple estimators based on
             having multiple observations of the same underlying signal
             under different noise conditions. The self-similarity model
             assumes the dictionary is the image itself, or actually its
             local patches. All these models indicate that images, and in
             particular image patches, do not actually live in the
             ambient high-dimensional space, but in some much lower
             dimensional stratification embedded on it.},
   Doi = {10.1145/1941487.1941512},
   Key = {fds265091}
}

@article{fds265088,
   Author = {Prasad, G and Jahanshad, N and Aganj, I and Lenglet, C and Sapiro, G and Toga, AW and Thompson, PM},
   Title = {ATLAS-BASED FIBER CLUSTERING FOR MULTI-SUBJECT ANALYSIS OF
             HIGH ANGULAR RESOLUTION DIFFUSION IMAGING
             TRACTOGRAPHY.},
   Journal = {Proceedings. IEEE International Symposium on Biomedical
             Imaging},
   Volume = {2011},
   Pages = {276-280},
   Publisher = {IEEE},
   Year = {2011},
   Month = {April},
   ISSN = {1945-7928},
   url = {http://dx.doi.org/10.1109/isbi.2011.5872405},
   Abstract = {High angular resolution diffusion imaging (HARDI) allows
             <i>in vivo</i> analysis of the white matter structure and
             connectivity. Based on orientation distribution functions
             (ODFs) that represent the directionality of water diffusion
             at each point in the brain, tractography methods can recover
             major axonal pathways. This enables tract-based analysis of
             fiber integrity and connectivity. For multi-subject
             comparisons, fibers may be clustered into bundles that are
             consistently found across subjects. To do this, we scanned
             20 young adults with HARDI at 4 T. From the reconstructed
             ODFs, we performed whole-brain tractography with a novel
             Hough transform method. We then used measures of agreement
             between the extracted 3D curves and a co-registered
             probabilistic DTI atlas to select key pathways. Using median
             filtering and a shortest path graph search, we derived the
             maximum density path to compactly represent each tract in
             the population. With this tract-based method, we performed
             tract-based analysis of fractional anisotropy, and assessed
             how the chosen tractography algorithm influenced the
             results. The resulting method may expedite population-based
             statistical analysis of HARDI and DTI.},
   Doi = {10.1109/isbi.2011.5872405},
   Key = {fds265088}
}

@article{fds304071,
   Author = {Silva, J and Chen, M and Eldar, YC and Sapiro, G and Carin,
             L},
   Title = {Blind Compressed Sensing Over a Structured Union of
             Subspaces},
   Volume = {abs/1103.2469},
   Year = {2011},
   Month = {March},
   url = {http://arxiv.org/abs/1103.2469v1},
   Abstract = {This paper addresses the problem of simultaneous signal
             recovery and dictionary learning based on compressive
             measurements. Multiple signals are analyzed jointly, with
             multiple sensing matrices, under the assumption that the
             unknown signals come from a union of a small number of
             disjoint subspaces. This problem is important, for instance,
             in image inpainting applications, in which the multiple
             signals are constituted by (incomplete) image patches taken
             from the overall image. This work extends standard
             dictionary learning and block-sparse dictionary
             optimization, by considering compressive measurements, e.g.,
             incomplete data). Previous work on blind compressed sensing
             is also generalized by using multiple sensing matrices and
             relaxing some of the restrictions on the learned dictionary.
             Drawing on results developed in the context of matrix
             completion, it is proven that both the dictionary and
             signals can be recovered with high probability from
             compressed measurements. The solution is unique up to block
             permutations and invertible linear transformations of the
             dictionary atoms. The recovery is contingent on the number
             of measurements per signal and the number of signals being
             sufficiently large; bounds are derived for these quantities.
             In addition, this paper presents a computationally practical
             algorithm that performs dictionary learning and signal
             recovery, and establishes conditions for its convergence to
             a local optimum. Experimental results for image inpainting
             demonstrate the capabilities of the method.},
   Key = {fds304071}
}

@article{fds265131,
   Author = {Carin, L and Baraniuk, RG and Cevher, V and Dunson, D and Jordan, MI and Sapiro, G and Wakin, MB},
   Title = {Learning Low-Dimensional Signal Models: A Bayesian approach
             based on incomplete measurements.},
   Journal = {IEEE signal processing magazine},
   Volume = {28},
   Number = {2},
   Pages = {39-51},
   Year = {2011},
   Month = {March},
   ISSN = {1053-5888},
   url = {http://dx.doi.org/10.1109/msp.2010.939733},
   Abstract = {Sampling, coding, and streaming even the most essential
             data, e.g., in medical imaging and weather-monitoring
             applications, produce a data deluge that severely stresses
             the available analog-to-digital converter, communication
             bandwidth, and digital-storage resources. Surprisingly,
             while the ambient data dimension is large in many problems,
             the relevant information in the data can reside in a much
             lower dimensional space. © 2006 IEEE.},
   Doi = {10.1109/msp.2010.939733},
   Key = {fds265131}
}

@article{fds265089,
   Author = {Ma, Y and Niyogi, P and Sapiro, G and Vidal, R},
   Title = {Dimensionality reduction via subspace and submanifold
             learning},
   Journal = {IEEE Signal Processing Magazine},
   Volume = {28},
   Number = {2},
   Pages = {14-126},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {January},
   ISSN = {1053-5888},
   url = {http://dx.doi.org/10.1109/MSP.2010.940005},
   Abstract = {The problem of finding and exploiting low-dimensional
             structures in high-dimensional data is taking on increasing
             importance in image, video, or audio processing; Web data
             analysis/search; and bioinformatics, where data sets now
             routinely lie in observational spaces of thousands,
             millions, or even billions of dimensions. The curse of
             dimensionality is in full play here: We often need to
             conduct meaningful inference with a limited number of
             samples in a very high-dimensional space. Conventional
             statistical and computational tools have become severely
             inadequate for processing and analyzing such
             high-dimensional data. © 2006 IEEE.},
   Doi = {10.1109/MSP.2010.940005},
   Key = {fds265089}
}

@article{fds264774,
   Author = {Sapiro, G},
   Title = {Should we choose to explain and understand? The
             interpretation of human actions},
   Journal = {QUINZAINE LITTERAIRE},
   Number = {1043},
   Pages = {28-28},
   Year = {2011},
   ISSN = {0048-6493},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000294072900021&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264774}
}

@article{fds264781,
   Author = {Sapiro, G},
   Title = {Merchants of Culture. The Publishing Business in the
             Twenty-First Century},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {186-87},
   Pages = {132-135},
   Year = {2011},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000289881700009&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264781}
}

@article{fds264817,
   Author = {Sapiro, G},
   Title = {WHAT IS A FRENCH PHILOSOPHER? The social life of concepts
             (1880-1980)},
   Journal = {QUINZAINE LITTERAIRE},
   Number = {1034},
   Pages = {21-21},
   Year = {2011},
   ISSN = {0048-6493},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000290073900016&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264817}
}

@article{fds264848,
   Author = {Sprechmann, P and Ramírez, I and Cancela, P and Sapiro,
             G},
   Title = {Collaborative sources identification in mixed signals via
             hierarchical sparse modeling.},
   Journal = {ICASSP},
   Pages = {5816-5819},
   Publisher = {IEEE},
   Year = {2011},
   ISBN = {978-1-4577-0539-7},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/icassp/icassp2010.html},
   Doi = {10.1109/ICASSP.2011.5947683},
   Key = {fds264848}
}

@article{fds264849,
   Author = {Léger, F and Yu, G and Sapiro, G},
   Title = {Efficient matrix completion with Gaussian
             models.},
   Journal = {ICASSP},
   Pages = {1113-1116},
   Publisher = {IEEE},
   Year = {2011},
   ISBN = {978-1-4577-0539-7},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/icassp/icassp2010.html},
   Doi = {10.1109/ICASSP.2011.5946603},
   Key = {fds264849}
}

@article{fds264858,
   Author = {Yu, G and Sapiro, G},
   Title = {Statistical Compressed Sensing of Gaussian Mixture
             Models.},
   Journal = {IEEE Trans. Signal Process.},
   Volume = {59},
   Pages = {5842-5858},
   Year = {2011},
   url = {http://dx.doi.org/10.1109/TSP.2011.2168521},
   Doi = {10.1109/TSP.2011.2168521},
   Key = {fds264858}
}

@article{fds264860,
   Author = {Yu, G and Sapiro, G},
   Title = {Statistical compressive sensing of Gaussian mixture
             models.},
   Journal = {ICASSP},
   Pages = {3728-3731},
   Publisher = {IEEE},
   Year = {2011},
   ISBN = {978-1-4577-0539-7},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/icassp/icassp2010.html},
   Doi = {10.1109/ICASSP.2011.5947161},
   Key = {fds264860}
}

@article{fds265073,
   Author = {Yu, G and Sapiro, G and Mallat, S},
   Title = {Image modeling and enhancement via structured sparse model
             selection},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Pages = {1641-1644},
   Publisher = {IEEE},
   Year = {2010},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2010.5653853},
   Abstract = {An image representation framework based on structured
             sparsemodel selection is introduced in this work. The
             corresponding modeling dictionary is comprised of a family
             of learned orthogonal bases. For an image patch, a model is
             first selected from this dictionary through linear
             approximation in a best basis, and the signal estimation is
             then calculated with the selected model. The model selection
             leads to a guaranteed near optimal denoising estimator. The
             degree of freedom in the model selection is equal to the
             number of the bases, typically about 10 for natural images,
             and is significantly lower than with traditional
             overcomplete dictionary approaches, stabilizing the
             representation. For an image patch of size √N × √N, the
             computational complexity of the proposed framework is O(N2),
             typically 2 to 3 orders of magnitude faster than estimation
             in an overcomplete dictionary. The orthogonal bases are
             adapted to the image of interest and are computed with a
             simple and fast procedure. State-of-the-art results are
             shown in image denoising, deblurring, and inpainting. ©
             2010 IEEE.},
   Doi = {10.1109/ICIP.2010.5653853},
   Key = {fds265073}
}

@article{fds265125,
   Author = {Paisley, J and Zhou, M and Sapiro, G and Carin, L},
   Title = {Nonparametric image interpolation and dictionary learning
             using spatially-dependent dirichlet and beta process
             priors},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Pages = {1869-1872},
   Publisher = {IEEE},
   Year = {2010},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2010.5653350},
   Abstract = {We present a Bayesian model for image interpolation and
             dictionary learning that uses two nonparametric priors for
             sparse signal representations: the beta process and the
             Dirichlet process. Additionally, the model uses spatial
             information within the image to encourage sharing of
             information within image subregions. We derive a hybrid
             MAP/Gibbs sampler, which performs Gibbs sampling for the
             latent indicator variables and MAP estimation for all other
             parameters. We present experimental results, where we show
             an improvement over other state-of-the-art algorithms in the
             low-measurement regime. © 2010 IEEE.},
   Doi = {10.1109/ICIP.2010.5653350},
   Key = {fds265125}
}

@article{fds265126,
   Author = {Castrodad, A and Xing, Z and Greer, J and Bosch, E and Carin, L and Sapiro,
             G},
   Title = {Discriminative sparse representations in hyperspectral
             imagery},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Pages = {1313-1316},
   Publisher = {IEEE},
   Year = {2010},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2010.5651568},
   Abstract = {Recent advances in sparse modeling and dictionary learning
             for discriminative applications show high potential for
             numerous classification tasks. In this paper, we show that
             highly accurate material classification from hyperspectral
             imagery (HSI) can be obtained with these models, even when
             the data is reconstructed from a very small percentage of
             the original image samples. The proposed supervised HSI
             classification is performed using a measure that accounts
             for both reconstruction errors and sparsity levels for
             sparse representations based on class-dependent learned
             dictionaries. Combining the dictionaries learned for the
             different materials, a linear mixing model is derived for
             sub-pixel classification. Results with real hyperspectral
             data cubes are shown both for urban and non-urban terrain.
             © 2010 IEEE.},
   Doi = {10.1109/ICIP.2010.5651568},
   Key = {fds265126}
}

@article{fds265079,
   Author = {Bugeau, A and Bertalmío, M and Caselles, V and Sapiro,
             G},
   Title = {A comprehensive framework for image inpainting.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {19},
   Number = {10},
   Pages = {2634-2645},
   Year = {2010},
   Month = {October},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20435541},
   Abstract = {Inpainting is the art of modifying an image in a form that
             is not detectable by an ordinary observer. There are
             numerous and very different approaches to tackle the
             inpainting problem, though as explained in this paper, the
             most successful algorithms are based upon one or two of the
             following three basic techniques: copy-and-paste texture
             synthesis, geometric partial differential equations (PDEs),
             and coherence among neighboring pixels. We combine these
             three building blocks in a variational model, and provide a
             working algorithm for image inpainting trying to approximate
             the minimum of the proposed energy functional. Our
             experiments show that the combination of all three terms of
             the proposed energy works better than taking each term
             separately, and the results obtained are within the
             state-of-the-art.},
   Doi = {10.1109/tip.2010.2049240},
   Key = {fds265079}
}

@article{Bronstein2010,
   Author = {Bronstein, AM and Bronstein, MM and Kimmel, R and Mahmoudi, M and Sapiro, G},
   Title = {A gromov-hausdorff framework with diffusion geometry for
             topologically-robust non-rigid shape matching},
   Journal = {International Journal of Computer Vision},
   Volume = {89},
   Number = {2-3},
   Pages = {266-286},
   Publisher = {Springer Nature},
   Year = {2010},
   Month = {September},
   ISSN = {0920-5691},
   url = {http://dx.doi.org/10.1007/s11263-009-0301-6},
   Abstract = {In this paper, the problem of non-rigid shape recognition is
             studied from the perspective of metric geometry. In
             particular, we explore the applicability of diffusion
             distances within the Gromov-Hausdorff framework. While the
             traditionally used geodesic distance exploits the shortest
             path between points on the surface, the diffusion distance
             averages all paths connecting the points. The diffusion
             distance constitutes an intrinsic metric which is robust, in
             particular, to topological changes. Such changes in the form
             of shortcuts, holes, and missing data may be a result of
             natural non-rigid deformations as well as acquisition and
             representation noise due to inaccurate surface construction.
             The presentation of the proposed framework is complemented
             with examples demonstrating that in addition to the
             relatively low complexity involved in the computation of the
             diffusion distances between surface points, its recognition
             and matching performances favorably compare to the classical
             geodesic distances in the presence of topological changes
             between the non-rigid shapes. © 2009 Springer
             Science+Business Media, LLC.},
   Doi = {10.1007/s11263-009-0301-6},
   Key = {Bronstein2010}
}

@article{fds265067,
   Author = {Ramirez, I and Sprechmann, P and Sapiro, G},
   Title = {Classification and clustering via dictionary learning with
             structured incoherence and shared features},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Pages = {3501-3508},
   Publisher = {IEEE},
   Year = {2010},
   Month = {August},
   ISSN = {1063-6919},
   url = {http://dx.doi.org/10.1109/CVPR.2010.5539964},
   Abstract = {A clustering framework within the sparse modeling and
             dictionary learning setting is introduced in this work.
             Instead of searching for the set of centroid that best fit
             the data, as in k-means type of approaches that model the
             data as distributions around discrete points, we optimize
             for a set of dictionaries, one for each cluster, for which
             the signals are best reconstructed in a sparse coding
             manner. Thereby, we are modeling the data as a union of
             learned low dimensional subspaces, and data points
             associated to subspaces spanned by just a few atoms of the
             same learned dictionary are clustered together. An
             incoherence promoting term encourages dictionaries
             associated to different classes to be as independent as
             possible, while still allowing for different classes to
             share features. This term directly acts on the dictionaries,
             thereby being applicable both in the supervised and
             unsupervised settings. Using learned dictionaries for
             classification and clustering makes this method robust and
             well suited to handle large datasets. The proposed framework
             uses a novel measurement for the quality of the sparse
             representation, inspired by the robustness of the ℓ1
             regularization term in sparse coding. In the case of
             unsupervised classification and/or clustering, a new
             initialization based on combining sparse coding with
             spectral clustering is proposed. This initialization
             clusters the dictionary atoms, and therefore is based on
             solving a low dimensional eigen-decomposition problem, being
             applicable to large datasets. We first illustrate the
             proposed framework with examples on standard image and
             speech datasets in the supervised classification setting,
             obtaining results comparable to the state-of-the-art with
             this simple approach. We then present experiments for fully
             unsupervised clustering on extended standard datasets and
             texture images, obtaining excellent performance. ©2010
             IEEE.},
   Doi = {10.1109/CVPR.2010.5539964},
   Key = {fds265067}
}

@article{fds265066,
   Author = {Caruyer, E and Aganj, I and Muetzel, RL and Lenglet, C and Sapiro, G and Deriche, R},
   Title = {Online orientation distribution function reconstrugtion in
             constant solid angle and its application to motion detection
             in HARDI},
   Journal = {2010 7th IEEE International Symposium on Biomedical Imaging:
             From Nano to Macro, ISBI 2010 - Proceedings},
   Pages = {812-815},
   Publisher = {IEEE},
   Year = {2010},
   Month = {August},
   url = {http://dx.doi.org/10.1109/ISBI.2010.5490052},
   Abstract = {The diffusion orientation distribution function (ODF) can be
             reconstructed from q-ball imaging (QBI) to map the complex
             intravoxel structure of water diffusion. As acquisition time
             is particularly large for high angular resolution diffusion
             imaging (HARDI), fast estimation algorithms have recently
             been proposed, as an on-line feedback on the reconstruction
             accuracy. Thus the acquisition could be stopped or continued
             on demand. We adapt these real-time algorithms to the
             mathematically correct definition of ODF in constant solid
             angle (CSA), and develop a motion detection algorithm upon
             this reconstruction. Results of improved fiber crossing
             detection by CSA ODF are shown, and motion detection was
             implemented and tested in vivo. © 2010 IEEE.},
   Doi = {10.1109/ISBI.2010.5490052},
   Key = {fds265066}
}

@article{fds264808,
   Author = {Sapiro, G},
   Title = {Globalization and cultural diversity in the book market: The
             case of literary translations in the US and in
             France},
   Journal = {Poetics},
   Volume = {38},
   Number = {4},
   Pages = {419-439},
   Publisher = {Elsevier BV},
   Year = {2010},
   Month = {August},
   ISSN = {0304-422X},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000281391000004&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1016/j.poetic.2010.05.001},
   Key = {fds264808}
}

@article{fds265078,
   Author = {Aganj, I and Lenglet, C and Sapiro, G and Yacoub, E and Ugurbil, K and Harel, N},
   Title = {Reconstruction of the orientation distribution function in
             single- and multiple-shell q-ball imaging within constant
             solid angle.},
   Journal = {Magnetic resonance in medicine},
   Volume = {64},
   Number = {2},
   Pages = {554-566},
   Year = {2010},
   Month = {August},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20535807},
   Abstract = {q-Ball imaging is a high-angular-resolution diffusion
             imaging technique that has been proven very successful in
             resolving multiple intravoxel fiber orientations in MR
             images. The standard computation of the orientation
             distribution function (the probability of diffusion in a
             given direction) from q-ball data uses linear radial
             projection, neglecting the change in the volume element
             along each direction. This results in spherical
             distributions that are different from the true orientation
             distribution functions. For instance, they are neither
             normalized nor as sharp as expected and generally require
             postprocessing, such as artificial sharpening. In this
             paper, a new technique is proposed that, by considering the
             solid angle factor, uses the mathematically correct
             definition of the orientation distribution function and
             results in a dimensionless and normalized orientation
             distribution function expression. Our model is flexible
             enough so that orientation distribution functions can be
             estimated either from single q-shell datasets or by
             exploiting the greater information available from multiple
             q-shell acquisitions. We show that the latter can be
             achieved by using a more accurate multiexponential model for
             the diffusion signal. The improved performance of the
             proposed method is demonstrated on artificial examples and
             high-angular-resolution diffusion imaging data acquired on a
             7-T magnet.},
   Doi = {10.1002/mrm.22365},
   Key = {fds265078}
}

@article{fds265064,
   Author = {Sprechmann, P and Ramirez, I and Sapiro, G and Eldar,
             Y},
   Title = {Collaborative hierarchical sparse modeling},
   Journal = {2010 44th Annual Conference on Information Sciences and
             Systems, CISS 2010},
   Publisher = {IEEE},
   Year = {2010},
   Month = {June},
   url = {http://dx.doi.org/10.1109/CISS.2010.5464845},
   Abstract = {Sparse modeling is a powerful framework for data analysis
             and processing. Traditionally, encoding in this framework is
             done by solving an ℓ1-regularized linear regression
             problem, usually called Lasso. In this work we first combine
             the sparsity-inducing property of the Lasso model, at the
             individual feature level, with the block-sparsity property
             of the group Lasso model, where sparse groups of features
             are jointly encoded, obtaining a sparsity pattern
             hierarchically structured. This results in the hierarchical
             Lasso, which shows important practical modeling advantages.
             We then extend this approach to the collaborative case,
             where a set of simultaneously coded signals share the same
             sparsity pattern at the higher (group) level but not
             necessarily at the lower one. Signals then share the same
             active groups, or classes, but not necessarily the same
             active set. This is very well suited for applications such
             as source separation. An efficient optimization procedure,
             which guarantees convergence to the global optimum, is
             developed for these new models. The underlying presentation
             of the new framework and optimization approach is
             complemented with experimental examples and preliminary
             theoretical results. ©2010 IEEE.},
   Doi = {10.1109/CISS.2010.5464845},
   Key = {fds265064}
}

@article{fds265075,
   Author = {Lecumberry, F and Pardo, A and Sapiro, G},
   Title = {Simultaneous object classification and segmentation with
             high-order multiple shape models.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {19},
   Number = {3},
   Pages = {625-635},
   Year = {2010},
   Month = {March},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20028636},
   Abstract = {Shape models (SMs), capturing the common features of a set
             of training shapes, represent a new incoming object based on
             its projection onto the corresponding model. Given a set of
             learned SMs representing different objects classes, and an
             image with a new shape, this work introduces a joint
             classification-segmentation framework with a twofold goal.
             First, to automatically select the SM that best represents
             the object, and second, to accurately segment the image
             taking into account both the image information and the
             features and variations learned from the online selected
             model. A new energy functional is introduced that
             simultaneously accomplishes both goals. Model selection is
             performed based on a shape similarity measure, online
             determining which model to use at each iteration of the
             steepest descent minimization, allowing for model switching
             and adaptation to the data. High-order SMs are used in order
             to deal with very similar object classes and natural
             variability within them. Position and transformation
             invariance is included as part of the modeling as well. The
             presentation of the framework is complemented with examples
             for the difficult task of simultaneously classifying and
             segmenting closely related shapes, such as stages of human
             activities, in images with severe occlusions.},
   Doi = {10.1109/tip.2009.2038759},
   Key = {fds265075}
}

@article{fds265063,
   Author = {Mairal, J and Bach, F and Ponce, J and Sapiro, G},
   Title = {Online learning for matrix factorization and sparse
             coding},
   Journal = {Journal of Machine Learning Research},
   Volume = {11},
   Pages = {19-60},
   Year = {2010},
   Month = {February},
   ISSN = {1532-4435},
   Abstract = {Sparse coding-that is, modelling data vectors as sparse
             linear combinations of basis elements-is widely used in
             machine learning, neuroscience, signal processing, and
             statistics. This paper focuses on the large-scale matrix
             factorization problem that consists of learning the basis
             set in order to adapt it to specific data. Variations of
             this problem include dictionary learning in signal
             processing, non-negative matrix factorization and sparse
             principal component analysis. In this paper, we propose to
             address these tasks with a new online optimization
             algorithm, based on stochastic approximations, which scales
             up gracefully to large data sets with millions of training
             samples, and extends naturally to various matrix
             factorization formulations, making it suitable for a wide
             range of learning problems. A proof of convergence is
             presented, along with experiments with natural images and
             genomic data demonstrating that it leads to state-of-the-art
             performance in terms of speed and optimization for both
             small and large data sets. © 2010 Julien Mairal, Francis
             Bach, Jean Ponce and Guillermo Sapiro.},
   Key = {fds265063}
}

@article{fds265076,
   Author = {White, T and Su, S and Schmidt, M and Kao, C-Y and Sapiro,
             G},
   Title = {The development of gyrification in childhood and
             adolescence.},
   Journal = {Brain and cognition},
   Volume = {72},
   Number = {1},
   Pages = {36-45},
   Year = {2010},
   Month = {February},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/19942335},
   Abstract = {Gyrification is the process by which the brain undergoes
             changes in surface morphology to create sulcal and gyral
             regions. The period of greatest development of brain
             gyrification is during the third trimester of pregnancy, a
             period of time in which the brain undergoes considerable
             growth. Little is known about changes in gyrification during
             childhood and adolescence, although considering the changes
             in gray matter volume and thickness during this time period,
             it is conceivable that alterations in the brain surface
             morphology could also occur during this period of
             development. The formation of gyri and sulci in the brain
             allows for compact wiring that promotes and enhances
             efficient neural processing. If cerebral function and form
             are linked through the organization of neural connectivity,
             then alterations in neural connectivity, i.e., synaptic
             pruning, may also alter the gyral and sulcal patterns of the
             brain. This paper reviews developmental theories of
             gyrification, computational techniques for measuring
             gyrification, and the potential interaction between
             gyrification and neuronal connectivity. We also present
             recent findings involving alterations in gyrification during
             childhood and adolescence.},
   Doi = {10.1016/j.bandc.2009.10.009},
   Key = {fds265076}
}

@article{fds264819,
   Author = {Shema-Didi, L and Sela, S and Geron, R and Sapiro, G and Ore, L and Kristal, B},
   Title = {The Beneficial Effects of One Year Pomegranate Juice
             Consumption on Traditional and Nontraditional Risk Factors
             for Cardiovascular Diseases},
   Journal = {Free Radical Biology and Medicine},
   Volume = {49},
   Pages = {S198-S198},
   Publisher = {Elsevier BV},
   Year = {2010},
   Month = {January},
   ISSN = {0891-5849},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000284348000581&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1016/j.freeradbiomed.2010.10.572},
   Key = {fds264819}
}

@article{fds264863,
   Author = {Sapiro, G},
   Title = {Special Section on Optimization in Imaging
             Sciences},
   Journal = {SIAM Journal on Imaging Sciences},
   Volume = {3},
   Number = {4},
   Pages = {1047-1047},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2010},
   Month = {January},
   url = {http://dx.doi.org/10.1137/sjisbi000003000004001047000001},
   Doi = {10.1137/sjisbi000003000004001047000001},
   Key = {fds264863}
}

@article{fds264869,
   Author = {Aganj, I and Lenglet, C and Sapiro, G},
   Title = {ODF maxima extraction in spherical harmonic representation
             via analytical search space reduction.},
   Journal = {Medical image computing and computer-assisted intervention :
             MICCAI ... International Conference on Medical Image
             Computing and Computer-Assisted Intervention},
   Volume = {13},
   Number = {Pt 2},
   Pages = {84-91},
   Year = {2010},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20879302},
   Abstract = {By revealing complex fiber structure through the orientation
             distribution function (ODF), q-ball imaging has recently
             become a popular reconstruction technique in
             diffusion-weighted MRI. In this paper, we propose an
             analytical dimension reduction approach to ODF maxima
             extraction. We show that by expressing the ODF, or any
             antipodally symmetric spherical function, in the common
             fourth order real and symmetric spherical harmonic basis,
             the maxima of the two-dimensional ODF lie on an analytically
             derived one-dimensional space, from which we can detect the
             ODF maxima. This method reduces the computational complexity
             of the maxima detection, without compromising the accuracy.
             We demonstrate the performance of our technique on both
             artificial and human brain data.},
   Doi = {10.1007/978-3-642-15745-5_11},
   Key = {fds264869}
}

@article{fds265068,
   Author = {Sprechmann, P and Sapiro, G},
   Title = {Dictionary learning and sparse coding for unsupervised
             clustering},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {2042-2045},
   Publisher = {IEEE},
   Year = {2010},
   Month = {January},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2010.5494985},
   Abstract = {A clustering framework within the sparse modeling and
             dictionary learning setting is introduced in this work.
             Instead of searching for the set of centroid that best fit
             the data, as in k-means type of approaches that model the
             data as distributions around discrete points, we optimize
             for a set of dictionaries, one for each cluster, for which
             the signals are best reconstructed in a sparse coding
             manner. Thereby, we are modeling the data as the of union of
             learned low dimensional subspaces, and data points
             associated to subspaces spanned by just a few atoms of the
             same learned dictionary are clustered together. Using
             learned dictionaries makes this method robust and well
             suited to handle large datasets. The proposed clustering
             algorithm uses a novel measurement for the quality of the
             sparse representation, inspired by the robustness of the
             ℓ1 regularization term in sparse coding. We first
             illustrate this measurement with examples on standard image
             and speech datasets in the supervised classification
             setting, showing with a simple approach its discriminative
             power and obtaining results comparable to the
             state-of-the-art. We then conclude with experiments for
             fully unsupervised clustering on extended standard datasets
             and texture images, obtaining excellent performance. ©2010
             IEEE.},
   Doi = {10.1109/ICASSP.2010.5494985},
   Key = {fds265068}
}

@article{fds265069,
   Author = {Bar, L and Sapiro, G},
   Title = {Hierarchical dictionary learning for invariant
             classification},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {3578-3581},
   Publisher = {IEEE},
   Year = {2010},
   Month = {January},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2010.5495916},
   Abstract = {Sparse representation theory has been increasingly used in
             the fields of signal processing and machine learning. The
             standard sparse models are not invariant to spatial
             transformations such as image rotations, and the
             representation is very sensitive even under small such
             distortions. Most studies addressing this problem proposed
             algorithms which either use transformed data as part of the
             training set, or are invariant or robust only under minor
             transformations. In this paper we suggest a framework which
             extracts sparse features invariant under significant
             rotations and scalings. The algorithm is based on a
             hierarchical architecture of dictionary learning for sparse
             coding in a cortical (log-polar) space. The proposed model
             is tested in supervised classification applications and
             proved to be robust under transformed data. ©2010
             IEEE.},
   Doi = {10.1109/ICASSP.2010.5495916},
   Key = {fds265069}
}

@article{fds265071,
   Author = {Bai, X and Wang, J and Sapiro, G},
   Title = {Dynamic color flow: A motion-adaptive color model for object
             segmentation in video},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {6315 LNCS},
   Number = {PART 5},
   Pages = {617-630},
   Publisher = {Springer Berlin Heidelberg},
   Year = {2010},
   Month = {January},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-642-15555-0_45},
   Abstract = {Accurately modeling object colors, and features in general,
             plays a critical role in video segmentation and analysis.
             Commonly used color models, such as global Gaussian
             mixtures, localized Gaussian mixtures, and pixel-wise
             adaptive ones, often fail to accurately represent the object
             appearance in complicated scenes, thereby leading to
             segmentation errors. We introduce a new color model, Dynamic
             Color Flow, which unlike previous approaches, incorporates
             motion estimation into color modeling in a probabilistic
             framework, and adaptively changes model parameters to match
             the local properties of the motion. The proposed model
             accurately and reliably describes changes in the scene's
             appearance caused by motion across frames. We show how to
             apply this color model to both foreground and background
             layers in a balanced way for efficient object segmentation
             in video. Experimental results show that when compared with
             previous approaches, our model provides more accurate
             foreground and background estimations, leading to more
             efficient video object cutout systems. © 2010
             Springer-Verlag.},
   Doi = {10.1007/978-3-642-15555-0_45},
   Key = {fds265071}
}

@article{fds265074,
   Author = {Fiori, M and Musé, P and Aguirre, S and Sapiro, G},
   Title = {Automatic colon polyp flagging via geometric and texture
             features.},
   Journal = {Annual International Conference of the IEEE Engineering in
             Medicine and Biology Society. IEEE Engineering in Medicine
             and Biology Society. Annual International
             Conference},
   Volume = {2010},
   Pages = {3170-3173},
   Year = {2010},
   Month = {January},
   ISSN = {1557-170X},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21096596},
   Abstract = {Computer Tomographic Colonography, combined with
             computer-aided detection (CAD), is a promising emerging
             technique for colonic polyp analysis. We present a CAD
             scheme for polyp flagging based on new texture and geometric
             features that consider both the information in the candidate
             polyp location and its immediate surrounding area, testing
             multiple sizes. The proposed algorithm is tested with ground
             truth data, including flat and small polyps, with very
             promising results.},
   Doi = {10.1109/iembs.2010.5627185},
   Key = {fds265074}
}

@article{fds265077,
   Author = {Wright, J and Ma, Y and Mairal, J and Sapiro, G and Huang, TS and Yan,
             S},
   Title = {Sparse representation for computer vision and pattern
             recognition},
   Journal = {Proceedings of the IEEE},
   Volume = {98},
   Number = {6},
   Pages = {1031-1044},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2010},
   Month = {January},
   ISSN = {0018-9219},
   url = {http://dx.doi.org/10.1109/JPROC.2010.2044470},
   Abstract = {Techniques from sparse signal representation are beginning
             to see significant impact in computer vision, often on
             nontraditional applications where the goal is not just to
             obtain a compact high-fidelity representation of the
             observed signal, but also to extract semantic information.
             The choice of dictionary plays a key role in bridging this
             gap: unconventional dictionaries consisting of, or learned
             from, the training samples themselves provide the key to
             obtaining state-of-the-art results and to attaching semantic
             meaning to sparse signal representations. Understanding the
             good performance of such unconventional dictionaries in turn
             demands new algorithmic and analytical techniques. This
             review paper highlights a few representative examples of how
             the interaction between sparse signal representation and
             computer vision can enrich both fields, and raises a number
             of open questions for further study. © 2010
             IEEE.},
   Doi = {10.1109/JPROC.2010.2044470},
   Key = {fds265077}
}

@article{fds264788,
   Author = {Sapiro, G and Steinmetz, G and Ducournau, C},
   Title = {The production of colonial and post-colonial
             representations},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {185},
   Pages = {4-11},
   Year = {2010},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000287773600001&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264788}
}

@article{fds264790,
   Author = {Sapiro, G},
   Title = {Punish the violence of words: the French intellectual
             process at the end of World War II},
   Journal = {ESPRIT CREATEUR},
   Volume = {50},
   Number = {4},
   Pages = {4-19},
   Year = {2010},
   ISSN = {0014-0767},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000285448900002&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264790}
}

@article{fds264795,
   Author = {Sapiro, G and Kaniuk, Y},
   Title = {THE LAST JEW},
   Journal = {QUINZAINE LITTERAIRE},
   Number = {1008},
   Pages = {12-13},
   Year = {2010},
   ISSN = {0048-6493},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000274277400010&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264795}
}

@article{fds264812,
   Author = {Steinmetz, G and Sapiro, G and Balandier, G},
   Title = {All scientific careers have autobiographical
             moments},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {185},
   Pages = {44-61},
   Year = {2010},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000287773600004&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264812}
}

@article{fds264846,
   Author = {Sprechmann, P and Ramírez, I and Sapiro, G and Eldar,
             YC},
   Title = {Collaborative hierarchical sparse modeling.},
   Journal = {CISS},
   Pages = {1-6},
   Publisher = {IEEE},
   Year = {2010},
   ISBN = {978-1-4244-7416-5},
   url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=5456462},
   Doi = {10.1109/CISS.2010.5464845},
   Key = {fds264846}
}

@article{fds264857,
   Author = {Mairal, J and Bach, FR and Ponce, J and Sapiro, G},
   Title = {Online Learning for Matrix Factorization and Sparse
             Coding.},
   Journal = {J. Mach. Learn. Res.},
   Volume = {11},
   Pages = {19-60},
   Year = {2010},
   url = {http://dx.doi.org/10.1145/1756006.1756008},
   Doi = {10.1145/1756006.1756008},
   Key = {fds264857}
}

@article{fds264861,
   Author = {Ramírez, I and Sapiro, G},
   Title = {Sparse coding and dictionary learning based on the MDL
             principle},
   Journal = {CoRR},
   Volume = {abs/1010.4751},
   Year = {2010},
   Key = {fds264861}
}

@article{fds264862,
   Author = {Ramírez, I and Sapiro, G},
   Title = {Universal Sparse Modeling},
   Journal = {CoRR},
   Volume = {abs/1003.2941},
   Year = {2010},
   Key = {fds264862}
}

@article{fds265065,
   Author = {Passalacqua, P and Trung, TD and Foufoula-Georgiou, E and Sapiro, G and Dietrich, WE},
   Title = {A geometric framework for channel network extraction from
             lidar: Nonlinear diffusion and geodesic paths},
   Journal = {Journal of Geophysical Research: Earth Surface},
   Volume = {115},
   Number = {1},
   Publisher = {American Geophysical Union (AGU)},
   Year = {2010},
   ISSN = {2169-9011},
   url = {http://dx.doi.org/10.1029/2009jf001254},
   Abstract = {[1] A geometric framework for the automatic extraction of
             channels and channel networks from high-resolution digital
             elevation data is introduced in this paper. The proposed
             approach incorporates nonlinear diffusion for the
             preprocessing of the data, both to remove noise and to
             enhance features that are critical to the network
             extraction. Following this preprocessing, channels are
             defined as curves of minimal effort, or geodesies, where the
             effort is measured on the basis of fundamental
             geomorphological characteristics such as flow accumulation
             area and isoheight contours curvature. The merits of the
             proposed methodology, and especially the computational
             efficiency and accurate localization of the extracted
             channels, are demonstrated using light detection and ranging
             (lidar) data of the Skunk Creek, a tributary of the South
             Fork Eel River basin in northern California. Copyright 2010
             by the American Geophysical Union.},
   Doi = {10.1029/2009jf001254},
   Key = {fds265065}
}

@article{fds265047,
   Author = {Szlam, A and Sapiro, G},
   Title = {Discriminative k-metrics},
   Journal = {Proceedings of the 26th International Conference On Machine
             Learning, ICML 2009},
   Pages = {1009-1016},
   Year = {2009},
   Month = {December},
   Abstract = {The k q-flats algorithm is a generalization of the popular
             k-means algorithm where q dimensional best fit affine sets
             replace centroids as the cluster prototypes. In this work, a
             modification of the k q-flats framework for pattern
             classification is introduced. The basic idea is to replace
             the original reconstruction only energy, which is optimized
             to obtain the k affine spaces, by a new energy that
             incorporates discriminative terms. This way, the actual
             classification task is introduced as part of the design and
             optimization. The presentation of the proposed framework is
             complemented with experimental results, showing that the
             method is computationally very efficient and gives excellent
             results on standard supervised learning benchmarks.},
   Key = {fds265047}
}

@article{fds265052,
   Author = {Raḿrez, I and Lecumberry, F and Sapiro, G},
   Title = {Universal priors for sparse modeling},
   Journal = {CAMSAP 2009 - 2009 3rd IEEE International Workshop on
             Computational Advances in Multi-Sensor Adaptive
             Processing},
   Pages = {197-200},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   url = {http://dx.doi.org/10.1109/CAMSAP.2009.5413302},
   Abstract = {Sparse data models, where data is assumed to be well
             represented as a linear combination of a few elements from a
             dictionary, have gained considerable attention in recent
             years, and their use has led to state-of-the-art results in
             many signal and image processing tasks. It is now well
             understood that the choice of the sparsity regularization
             term is critical in the success of such models. In this
             work, we use tools from information theory to propose a
             sparsity regularization term which has several theoretical
             and practical advantages over the more standard ℓ0 or ℓ1
             ones, and which leads to improved coding performance and
             accuracy in reconstruction tasks. We also briefly report on
             further improvements obtained by imposing low mutual
             coherence and Gram matrix norm on the learned dictionaries.
             © 2009 IEEE.},
   Doi = {10.1109/CAMSAP.2009.5413302},
   Key = {fds265052}
}

@article{fds265055,
   Author = {Mairal, J and Bach, F and Ponce, J and Sapiro, G and Zisserman,
             A},
   Title = {Non-local sparse models for image restoration},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Pages = {2272-2279},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ICCV.2009.5459452},
   Abstract = {We propose in this paper to unify two different approaches
             to image restoration: On the one hand, learning a basis set
             (dictionary) adapted to sparse signal descriptions has
             proven to be very effective in image reconstruction and
             classification tasks. On the other hand, explicitly
             exploiting the self-similarities of natural images has led
             to the successful non-local means approach to image
             restoration. We propose simultaneous sparse coding as a
             framework for combining these two approaches in a natural
             manner. This is achieved by jointly decomposing groups of
             similar signals on subsets of the learned dictionary.
             Experimental results in image denoising and demosaicking
             tasks with synthetic and real noise show that the proposed
             method outperforms the state of the art, making it possible
             to effectively restore raw images from digital cameras at a
             reasonable speed and memory cost. ©2009
             IEEE.},
   Doi = {10.1109/ICCV.2009.5459452},
   Key = {fds265055}
}

@article{fds265049,
   Author = {Szlam, A and Sapiro, G},
   Title = {Discriminative k metrics and the Chan-Vese model for object
             detection and segmentation},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {7446},
   Publisher = {SPIE},
   Year = {2009},
   Month = {November},
   ISSN = {0277-786X},
   url = {http://dx.doi.org/10.1117/12.825800},
   Abstract = {In this work, a modification of the k q-flats framework for
             pattern classification introduced in [9] is used for
             pixelwise object detection. We include a preliminary
             discussion of augmenting this method is with a
             Chan-Vese-like geometric regularization © 2009
             SPIE.},
   Doi = {10.1117/12.825800},
   Key = {fds265049}
}

@article{fds265043,
   Author = {Arias, P and Caselles, V and Sapiro, G},
   Title = {A variational framework for non-local image
             inpainting},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {5681 LNCS},
   Pages = {345-358},
   Publisher = {Springer Berlin Heidelberg},
   Year = {2009},
   Month = {November},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-642-03641-5_26},
   Abstract = {Non-local methods for image denoising and inpainting have
             gained considerable attention in recent years. This is in
             part due to their superior performance in textured images, a
             known weakness of purely local methods. Local methods on the
             other hand have demonstrated to be very appropriate for the
             recovering of geometric structure such as image edges. The
             synthesis of both types of methods is a trend in current
             research. Variational analysis in particular is an
             appropriate tool for a unified treatment of local and
             non-local methods. In this work we propose a general
             variational framework for the problem of non-local image
             inpainting, from which several previous inpainting schemes
             can be derived, in addition to leading to novel ones. We
             explicitly study some of these, relating them to previous
             work and showing results on synthetic and real images. ©
             2009 Springer.},
   Doi = {10.1007/978-3-642-03641-5_26},
   Key = {fds265043}
}

@article{fds265044,
   Author = {Wirth, B and Bar, L and Rumpf, M and Sapiro, G},
   Title = {Geodesics in shape space via variational time
             discretization},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {5681 LNCS},
   Pages = {288-302},
   Publisher = {Springer Berlin Heidelberg},
   Year = {2009},
   Month = {November},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-642-03641-5_22},
   Abstract = {A variational approach to defining geodesics in the space of
             implicitly described shapes is introduced in this paper. The
             proposed framework is based on the time discretization of a
             geodesic path as a sequence of pairwise matching problems,
             which is strictly invariant with respect to rigid body
             motions and ensures a 1-1 property of the induced flow in
             shape space. For decreasing time step size, the proposed
             model leads to the minimization of the actual geodesic
             length, where the Hessian of the pairwise matching energy
             reflects the chosen Riemannian metric on the shape space.
             Considering shapes as boundary contours, the proposed shape
             metric is identical to a physical dissipation in a viscous
             fluid model of optimal transportation. If the pairwise shape
             correspondence is replaced by the volume of the shape
             mismatch as a penalty functional, for decreasing time step
             size one obtains an additional optical flow term controlling
             the transport of the shape by the underlying motion field.
             The implementation of the proposed approach is based on a
             level set representation of shapes, which allows topological
             transitions along the geodesic path. For the spatial
             discretization a finite element approximation is employed
             both for the pairwise deformations and for the level set
             representation. The numerical relaxation of the energy is
             performed via an efficient multi-scale procedure in space
             and time. Examples for 2D and 3D shapes underline the
             effectiveness and robustness of the proposed approach. ©
             2009 Springer.},
   Doi = {10.1007/978-3-642-03641-5_22},
   Key = {fds265044}
}

@article{fds265046,
   Author = {Facciolo, G and Arias, P and Caselles, V and Sapiro,
             G},
   Title = {Exemplar-based interpolation of sparsely sampled
             images},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {5681 LNCS},
   Pages = {331-344},
   Publisher = {Springer Berlin Heidelberg},
   Year = {2009},
   Month = {November},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-642-03641-5_25},
   Abstract = {A nonlocal variational formulation for interpolating a
             sparsely sampled image is introduced in this paper. The
             proposed variational formulation, originally motivated by
             image inpainting problems, encourages the transfer of
             information between similar image patches, following the
             paradigm of exemplar-based methods. Contrary to the
             classical inpainting problem, no complete patches are
             available from the sparse image samples, and the patch
             similarity criterion has to be redefined as here proposed.
             Initial experimental results with the proposed framework, at
             very low sampling densities, are very encouraging. We also
             explore some departures from the variational setting,
             showing a remarkable ability to recover textures at low
             sampling densities. © 2009 Springer.},
   Doi = {10.1007/978-3-642-03641-5_25},
   Key = {fds265046}
}

@article{fds265062,
   Author = {Aganj, I and Sapiro, G and Parikshak, N and Madsen, SK and Thompson,
             PM},
   Title = {Measurement of cortical thickness from MRI by minimum line
             integrals on soft-classified tissue.},
   Journal = {Human brain mapping},
   Volume = {30},
   Number = {10},
   Pages = {3188-3199},
   Year = {2009},
   Month = {October},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/19219850},
   Abstract = {Estimating the thickness of the cerebral cortex is a key
             step in many brain imaging studies, revealing valuable
             information on development or disease progression. In this
             work, we present a framework for measuring the cortical
             thickness, based on minimizing line integrals over the
             probability map of the gray matter in the MRI volume. We
             first prepare a probability map that contains the
             probability of each voxel belonging to the gray matter.
             Then, the thickness is basically defined for each voxel as
             the minimum line integral of the probability map on line
             segments centered at the point of interest. In contrast to
             our approach, previous methods often perform a binary-valued
             hard segmentation of the gray matter before measuring the
             cortical thickness. Because of image noise and partial
             volume effects, such a hard classification ignores the
             underlying tissue class probabilities assigned to each
             voxel, discarding potentially useful information. We
             describe our proposed method and demonstrate its performance
             on both artificial volumes and real 3D brain MRI data from
             subjects with Alzheimer's disease and healthy
             individuals.},
   Doi = {10.1002/hbm.20740},
   Key = {fds265062}
}

@article{fds265041,
   Author = {Szlam, A and Sapiro, G},
   Title = {Discriminative k-metrics},
   Journal = {ACM International Conference Proceeding Series},
   Volume = {382},
   Publisher = {ACM Press},
   Year = {2009},
   Month = {September},
   url = {http://dx.doi.org/10.1145/1553374.1553503},
   Abstract = {The k q-flats algorithm is a generalization of the popular
             k-means algorithm where q dimensional best fit affine sets
             replace centroids as the cluster prototypes. In this work, a
             modification of the k q-flats framework for pattern
             classification is introduced. The basic idea is to replace
             the original reconstruction only energy, which is optimized
             to obtain the k affine spaces, by a new energy that
             incorporates discriminative terms. This way, the actual
             classification task is introduced as part of the design and
             optimization. The presentation of the proposed framework is
             complemented with experimental results, showing that the
             method is computationally very efficient and gives excellent
             results on standard supervised learning benchmarks.
             Copyright 2009.},
   Doi = {10.1145/1553374.1553503},
   Key = {fds265041}
}

@article{fds265042,
   Author = {Mairal, J and Bach, F and Ponce, J and Sapiro, G},
   Title = {Online dictionary learning for sparse coding},
   Journal = {ACM International Conference Proceeding Series},
   Volume = {382},
   Publisher = {ACM Press},
   Year = {2009},
   Month = {September},
   url = {http://dx.doi.org/10.1145/1553374.1553463},
   Abstract = {Sparse coding - that is, modelling data vectors as sparse
             linear combinations of basis elements - is widely used in
             machine learning, neuroscience, signal processing, and
             statistics. This paper focuses on learning the basis set,
             also called dictionary, to adapt it to specific data, an
             approach that has recently proven to be very effective for
             signal reconstruction and classification in the audio and
             image processing domains. This paper proposes a new online
             optimization algorithm for dictionary learning, based on
             stochastic approximations, which scales up gracefully to
             large datasets with millions of training samples. A proof of
             convergence is presented, along with experiments with
             natural images demonstrating that it leads to faster
             performance and better dictionaries than classical batch
             algorithms for both small and large datasets. Copyright
             2009.},
   Doi = {10.1145/1553374.1553463},
   Key = {fds265042}
}

@article{fds264760,
   Author = {Sapiro, G},
   Title = {Un héritage symbolique détourné ? La nouvelle revue
             française des années noires1},
   Journal = {Études littéraires},
   Volume = {40},
   Number = {1},
   Pages = {97-117},
   Publisher = {Consortium Erudit},
   Year = {2009},
   Month = {September},
   ISSN = {0014-214X},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000274064000006&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Abstract = {<jats:p>La reparution de <jats:italic>La nouvelle revue
             française</jats:italic> sous l’Occupation allemande en
             France pose la question à la fois théorique et pratique de
             la continuité des institutions. <jats:italic>La
             NRF</jats:italic> de Drieu La Rochelle est-elle
             <jats:italic>La NRF </jats:italic>? Ou s’agit-il d’une
             usurpation, comme le donne à penser le départ de son
             ancien directeur, Jean Paulhan, et des membres fondateurs ?
             Ce problème divise d’emblée l’équipe des anciens
             collaborateurs. Leurs hésitations, leurs choix, révèlent
             le processus de réajustement des stratégies dans les
             nouvelles conditions de production, caractérisées par la
             perte d’autonomie du champ littéraire. La ligne adoptée
             par la revue pose aussi la question de savoir si l’art pur
             est possible sous l’oppression. Le déclin de
             <jats:italic>La NRF </jats:italic>et son discrédit vont
             durablement contribuer à la délégitimation de l’option
             de l’art pour l’art dans le champ littéraire
             français.</jats:p>},
   Doi = {10.7202/037901ar},
   Key = {fds264760}
}

@article{fds265061,
   Author = {Sundaramoorthi, G and Yezzi, A and Mennucci, AC and Sapiro,
             G},
   Title = {New possibilities with Sobolev active contours},
   Journal = {International Journal of Computer Vision},
   Volume = {84},
   Number = {2},
   Pages = {113-129},
   Publisher = {Springer Nature},
   Year = {2009},
   Month = {August},
   ISSN = {0920-5691},
   url = {http://dx.doi.org/10.1007/s11263-008-0133-9},
   Abstract = {Recently, the Sobolev metric was introduced to define
             gradient flows of various geometric active contour energies.
             It was shown that the Sobolev metric outperforms the
             traditional metric for the same energy in many cases such as
             for tracking where the coarse scale changes of the contour
             are important. Some interesting properties of Sobolev
             gradient flows include that they stabilize certain unstable
             traditional flows, and the order of the evolution PDEs are
             reduced when compared with traditional gradient flows of the
             same energies. In this paper, we explore new possibilities
             for active contours made possible by Sobolev metrics. The
             Sobolev method allows one to implement new energy-based
             active contour models that were not otherwise considered
             because the traditional minimizing method render them
             ill-posed or numerically infeasible. In particular, we
             exploit the stabilizing and the order reducing properties of
             Sobolev gradients to implement the gradient descent of these
             new energies. We give examples of this class of energies,
             which include some simple geometric priors and new
             edge-based energies. We also show that these energies can be
             quite useful for segmentation and tracking. We also show
             that the gradient flows using the traditional metric are
             either ill-posed or numerically difficult to implement, and
             then show that the flows can be implemented in a stable and
             numerically feasible manner using the Sobolev gradient. ©
             2008 Springer Science+Business Media, LLC.},
   Doi = {10.1007/s11263-008-0133-9},
   Key = {fds265061}
}

@article{fds265060,
   Author = {Bai, X and Wang, J and Simons, D and Sapiro, G},
   Title = {Video SnapCut: Robust video object cutout using localized
             classifiers},
   Journal = {ACM Transactions on Graphics},
   Volume = {28},
   Number = {3},
   Pages = {1-1},
   Publisher = {Association for Computing Machinery (ACM)},
   Year = {2009},
   Month = {July},
   ISSN = {0730-0301},
   url = {http://dx.doi.org/10.1145/1531326.1531376},
   Abstract = {Although tremendous success has been achieved for
             interactive object cutout in still images, accurately
             extracting dynamic objects in video remains a very
             challenging problem. Previous video cutout systems present
             two major limitations: (1) reliance on global statistics,
             thus lacking the ability to deal with complex and diverse
             scenes; and (2) treating segmentation as a global
             optimization, thus lacking a practical workflow that can
             guarantee the convergence of the systems to the desired
             results. We present Video SnapCut, a robust video object
             cutout system that significantly advances the
             state-of-the-art. In our system segmentation is achieved by
             the collaboration of a set of local classifiers, each
             adaptively integrating multiple local image features. We
             show how this segmentation paradigm naturally supports local
             user editing and propagates them across time. The object
             cutout system is completed with a novel coherent video
             matting technique. A comprehensive evaluation and comparison
             is presented, demonstrating the effectiveness of the
             proposed system at achieving high quality results, as well
             as the robustness of the system against various types of
             inputs. © 2009 ACM.},
   Doi = {10.1145/1531326.1531376},
   Key = {fds265060}
}

@article{fds265040,
   Author = {Duarte-Carvajalino, JM and Sapiro, G},
   Title = {Learning to sense sparse signals: simultaneous sensing
             matrix and sparsifying dictionary optimization.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {18},
   Number = {7},
   Pages = {1395-1408},
   Year = {2009},
   Month = {July},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/19497818},
   Abstract = {Sparse signal representation, analysis, and sensing have
             received a lot of attention in recent years from the signal
             processing, optimization, and learning communities. On one
             hand, learning overcomplete dictionaries that facilitate a
             sparse representation of the data as a liner combination of
             a few atoms from such dictionary leads to state-of-the-art
             results in image and video restoration and classification.
             On the other hand, the framework of compressed sensing (CS)
             has shown that sparse signals can be recovered from far less
             samples than those required by the classical Shannon-Nyquist
             Theorem. The samples used in CS correspond to linear
             projections obtained by a sensing projection matrix. It has
             been shown that, for example, a nonadaptive random sampling
             matrix satisfies the fundamental theoretical requirements of
             CS, enjoying the additional benefit of universality. On the
             other hand, a projection sensing matrix that is optimally
             designed for a certain class of signals can further improve
             the reconstruction accuracy or further reduce the necessary
             number of samples. In this paper, we introduce a framework
             for the joint design and optimization, from a set of
             training images, of the nonparametric dictionary and the
             sensing matrix. We show that this joint optimization
             outperforms both the use of random sensing matrices and
             those matrices that are optimized independently of the
             learning of the dictionary. Particular cases of the proposed
             framework include the optimization of the sensing matrix for
             a given dictionary as well as the optimization of the
             dictionary for a predefined sensing environment. The
             presentation of the framework and its efficient numerical
             optimization is complemented with numerous examples on
             classical image datasets.},
   Doi = {10.1109/tip.2009.2022459},
   Key = {fds265040}
}

@article{fds265045,
   Author = {Jahanshad, N and Lee, AD and Lepore, N and Chou, Y-Y and Brun, C and Barysheva, M and Toga, AW and McMahon, KL and de Zubicaray, GI and Wright, MJ and Sapiro, G and Lenglet, C and Thompson,
             PM},
   Title = {REDUCING STRUCTURAL VARIATION TO DETERMINE THE GENETICS OF
             WHITE MATTER INTEGRITY ACROSS HEMISPHERES - A DTI STUDY OF
             100 TWINS.},
   Journal = {Proceedings. IEEE International Symposium on Biomedical
             Imaging},
   Volume = {2009},
   Pages = {819-822},
   Publisher = {IEEE},
   Year = {2009},
   Month = {June},
   url = {http://dx.doi.org/10.1109/isbi.2009.5193175},
   Abstract = {Studies of cerebral asymmetry can open doors to
             understanding the functional specialization of each brain
             hemisphere, and how this is altered in disease. Here we
             examined hemispheric asymmetries in fiber architecture using
             diffusion tensor imaging (DTI) in 100 subjects, using
             high-dimensional fluid warping to disentangle shape
             differences from measures sensitive to myelination.
             Confounding effects of purely structural asymmetries were
             reduced by using co-registered structural images to fluidly
             warp 3D maps of fiber characteristics (fractional and
             geodesic anisotropy) to a structurally symmetric minimal
             deformation template (MDT). We performed a quantitative
             genetic analysis on 100 subjects to determine whether the
             sources of the remaining signal asymmetries were primarily
             genetic or environmental. A twin design was used to identify
             the heritable features of fiber asymmetry in various regions
             of interest, to further assist in the discovery of genes
             influencing brain micro-architecture and brain
             lateralization. Genetic influences and left/right
             asymmetries were detected in the fiber architecture of the
             frontal lobes, with minor differences depending on the
             choice of registration template.},
   Doi = {10.1109/isbi.2009.5193175},
   Key = {fds265045}
}

@article{fds265048,
   Author = {Aganj, I and Lenglet, C and Sapiro, G},
   Title = {ODF RECONSTRUCTION IN Q-BALL IMAGING WITH SOLID ANGLE
             CONSIDERATION.},
   Journal = {Proceedings. IEEE International Symposium on Biomedical
             Imaging},
   Volume = {2009},
   Pages = {1398-1401},
   Publisher = {IEEE},
   Year = {2009},
   Month = {June},
   url = {http://dx.doi.org/10.1109/isbi.2009.5193327},
   Abstract = {Q-ball imaging (QBI) is a high angular resolution diffusion
             imaging (HARDI) technique which has been proven very
             successful in resolving multiple intravoxel fiber
             orientations in MR images. The standard computation of the
             orientation distribution function (ODF, the probability of
             diffusion in a given direction) from q-ball uses linear
             radial projection, neglecting the change in the volume
             element along the ray, thereby resulting in distributions
             different from the <i>true</i> ODFs. For instance, they are
             not normalized or as sharp as expected, and generally
             require post-processing, such as sharpening or spherical
             deconvolution. In this paper, we consider the mathematically
             correct definition of the ODF and derive a closed-form
             expression for it in QBI. The derived ODF is dimensionless
             and normalized, and can be efficiently computed from q-ball
             acquisition protocols. We describe our proposed method and
             demonstrate its significantly improved performance on
             artificial data and real HARDI volumes.},
   Doi = {10.1109/isbi.2009.5193327},
   Key = {fds265048}
}

@article{fds265059,
   Author = {Bai, X and Sapiro, G},
   Title = {Geodesic matting: A framework for fast interactive image and
             video segmentation and matting},
   Journal = {International Journal of Computer Vision},
   Volume = {82},
   Number = {2},
   Pages = {113-132},
   Publisher = {Springer Nature},
   Year = {2009},
   Month = {April},
   ISSN = {0920-5691},
   url = {http://dx.doi.org/10.1007/s11263-008-0191-z},
   Abstract = {An interactive framework for soft segmentation and matting
             of natural images and videos is presented in this paper. The
             proposed technique is based on the optimal, linear time,
             computation of weighted geodesic distances to user-provided
             scribbles, from which the whole data is automatically
             segmented. The weights are based on spatial and/or temporal
             gradients, considering the statistics of the pixels
             scribbled by the user, without explicit optical flow or any
             advanced and often computationally expensive feature
             detectors. These could be naturally added to the proposed
             framework as well if desired, in the form of weights in the
             geodesic distances. An automatic localized refinement step
             follows this fast segmentation in order to further improve
             the results and accurately compute the corresponding matte
             function. Additional constraints into the distance
             definition permit to efficiently handle occlusions such as
             people or objects crossing each other in a video sequence.
             The presentation of the framework is complemented with
             numerous and diverse examples, including extraction of
             moving foreground from dynamic background in video, natural
             and 3D medical images, and comparisons with the recent
             literature.},
   Doi = {10.1007/s11263-008-0191-z},
   Key = {fds265059}
}

@article{fds265056,
   Author = {Pollick, FE and Maoz, U and Handzel, AA and Giblin, PJ and Sapiro, G and Flash, T},
   Title = {Three-dimensional arm movements at constant equi-affine
             speed.},
   Journal = {Cortex; a journal devoted to the study of the nervous system
             and behavior},
   Volume = {45},
   Number = {3},
   Pages = {325-339},
   Year = {2009},
   Month = {March},
   ISSN = {0010-9452},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18678364},
   Abstract = {It has long been acknowledged that planar hand drawing
             movements conform to a relationship between movement speed
             and shape, such that movement speed is inversely
             proportional to the curvature to the power of one-third.
             Previous literature has detailed potential explanations for
             the power law's existence as well as systematic deviations
             from it. However, the case of speed-shape relations for
             three-dimensional (3D) drawing movements has remained
             largely unstudied. In this paper we first derive a
             generalization of the planar power law to 3D movements,
             which is based on the principle that this power law implies
             motion at constant equi-affine speed. This generalization
             results in a 3D power law where speed is inversely related
             to the one-third power of the curvature multiplied by the
             one-sixth power of the torsion. Next, we present data from
             human 3D scribbling movements, and compare the obtained
             speed-shape relation to that predicted by the 3D power law.
             Our results indicate that the introduction of the torsion
             term into the 3D power law accounts for significantly more
             of the variance in speed-shape relations of the movement
             data and that the obtained exponents are very close to the
             predicted values.},
   Doi = {10.1016/j.cortex.2008.03.010},
   Key = {fds265056}
}

@article{fds265058,
   Author = {Lenglet, C and Campbell, JSW and Descoteaux, M and Haro, G and Savadjiev, P and Wassermann, D and Anwander, A and Deriche, R and Pike,
             GB and Sapiro, G and Siddiqi, K and Thompson, PM},
   Title = {Mathematical methods for diffusion MRI processing.},
   Journal = {NeuroImage},
   Volume = {45},
   Number = {1 Suppl},
   Pages = {S111-S122},
   Year = {2009},
   Month = {March},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/19063977},
   Abstract = {In this article, we review recent mathematical models and
             computational methods for the processing of diffusion
             Magnetic Resonance Images, including state-of-the-art
             reconstruction of diffusion models, cerebral white matter
             connectivity analysis, and segmentation techniques. We focus
             on Diffusion Tensor Images (DTI) and Q-Ball Images
             (QBI).},
   Doi = {10.1016/j.neuroimage.2008.10.054},
   Key = {fds265058}
}

@article{fds265057,
   Author = {Ivry, T and Michal, S and Avihoo, A and Sapiro, G and Barash,
             D},
   Title = {An image processing approach to computing distances between
             RNA secondary structures dot plots.},
   Journal = {Algorithms for molecular biology : AMB},
   Volume = {4},
   Pages = {4},
   Year = {2009},
   Month = {February},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/19203377},
   Abstract = {<h4>Background</h4>Computing the distance between two RNA
             secondary structures can contribute in understanding the
             functional relationship between them. When used repeatedly,
             such a procedure may lead to finding a query RNA structure
             of interest in a database of structures. Several methods are
             available for computing distances between RNAs represented
             as strings or graphs, but none utilize the RNA
             representation with dot plots. Since dot plots are
             essentially digital images, there is a clear motivation to
             devise an algorithm for computing the distance between dot
             plots based on image processing methods.<h4>Results</h4>We
             have developed a new metric dubbed 'DoPloCompare', which
             compares two RNA structures. The method is based on
             comparing dot plot diagrams that represent the secondary
             structures. When analyzing two diagrams and motivated by
             image processing, the distance is based on a combination of
             histogram correlations and a geometrical distance measure.
             We introduce, describe, and illustrate the procedure by two
             applications that utilize this metric on RNA sequences. The
             first application is the RNA design problem, where the goal
             is to find the nucleotide sequence for a given secondary
             structure. Examples where our proposed distance measure
             outperforms others are given. The second application locates
             peculiar point mutations that induce significant structural
             alternations relative to the wild type predicted secondary
             structure. The approach reported in the past to solve this
             problem was tested on several RNA sequences with known
             secondary structures to affirm their prediction, as well as
             on a data set of ribosomal pieces. These pieces were
             computationally cut from a ribosome for which an
             experimentally derived secondary structure is available, and
             on each piece the prediction conveys similarity to the
             experimental result. Our newly proposed distance measure
             shows benefit in this problem as well when compared to
             standard methods used for assessing the distance similarity
             between two RNA secondary structures.<h4>Conclusion</h4>Inspired
             by image processing and the dot plot representation for RNA
             secondary structure, we have managed to provide a
             conceptually new and potentially beneficial metric for
             comparing two RNA secondary structures. We illustrated our
             approach on the RNA design problem, as well as on an
             application that utilizes the distance measure to detect
             conformational rearranging point mutations in an RNA
             sequence.},
   Doi = {10.1186/1748-7188-4-4},
   Key = {fds265057}
}

@article{fds264696,
   Author = {Castrodad, A and Ramirez, I and Sapiro, G and Sprechmann, P and Yu,
             G},
   Title = {Second-generation sparse modeling: Structured and
             collaborative signal analysis},
   Pages = {65-87},
   Publisher = {Cambridge University Press},
   Year = {2009},
   Month = {January},
   url = {http://dx.doi.org/10.1017/CBO9780511794308.003},
   Abstract = {In this chapter the authors go beyond traditional sparse
             modeling, and address collaborative structured sparsity to
             add stability and prior information to the representation.
             In structured sparse modeling, instead of considering the
             dictionary atoms as singletons, the atoms are partitioned in
             groups, and a few groups are selected at a time for the
             signal encoding. A complementary way of adding structure,
             stability, and prior information to a model is via
             collaboration. Here, multiple signals, which are known to
             follow the same model, are allowed to collaborate in the
             coding. The first studied framework connects sparse modeling
             with Gaussian Mixture Models and leads to state-of-the-art
             image restoration. The second framework derives a
             hierarchical structure on top of the collaboration and is
             well fitted for source separation. Both models enjoy very
             important theoretical virtues as well. Introduction In
             traditional sparse modeling, it is assumed that a signal can
             be accurately represented by a sparse linear combination of
             atoms from a (learned) dictionary. A large class of signals,
             including most natural images and sounds, is well described
             by this model, as demonstrated by numerous state-of-the-art
             results in various signal processing applications. From a
             data modeling point of view, sparsity can be seen as a form
             of regularization, that is, as a device to restrict or
             control the set of coefficient values which are allowed in
             the model to produce an estimate of the data.},
   Doi = {10.1017/CBO9780511794308.003},
   Key = {fds264696}
}

@article{fds264859,
   Author = {Rother, D and Sapiro, G},
   Title = {Seeing 3D Objects in a Single 2D Image},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Pages = {1819-1826},
   Publisher = {IEEE},
   Year = {2009},
   Month = {January},
   ISBN = {9781424444205},
   url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=5453389},
   Abstract = {A general framework simultaneously addressing pose
             estimation, 2D segmentation, object recognition, and 3D
             reconstruction from a single image is introduced in this
             paper. The proposed approach partitions 3D space into voxels
             and estimates the voxel states that maximize a likelihood
             integrating two components: the object fidelity, that is,
             the probability that an object occupies the given voxels,
             here encoded as a 3D shape prior learned from 3D samples of
             objects in a class; and the image fidelity, meaning the
             probability that the given voxels would produce the input
             image when properly projected to the image plane. We derive
             a loop-less graphical model for this likelihood and propose
             a computationally efficient optimization algorithm that is
             guaranteed to produce the global likelihood maximum.
             Furthermore, we derive a multi-resolution implementation of
             this algorithm that permits to trade reconstruction and
             estimation accuracy for computation. The presentation of the
             proposed framework is complemented with experiments on real
             data demonstrating the accuracy of the proposed
             approach.},
   Doi = {10.1109/ICCV.2009.5459405},
   Key = {fds264859}
}

@article{fds264865,
   Author = {Bar, L and Sapiro, G},
   Title = {Generalized newton-type methods for energy formulations in
             image processing},
   Journal = {SIAM Journal on Imaging Sciences},
   Volume = {2},
   Number = {2},
   Pages = {508-531},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2009},
   Month = {January},
   url = {http://dx.doi.org/10.1137/080722436},
   Abstract = {Many problems in image processing are addressed via the
             minimization of a cost functional. The most prominently used
             optimization technique is gradient-descent, often used due
             to its simplicity and applicability where other techniques,
             e.g., those coming from discrete optimization, cannot be
             applied. Yet, gradient-descent suffers from slow
             convergence, and often to just local minima which highly
             depend on the initialization and the condition number of the
             functional Hessian. Newton-type methods, on the other hand,
             are known to have a faster, quadratic convergence. In its
             classical form, the Newton method relies on the L2-type norm
             to define the descent direction. In this paper, we
             generalize and reformulate this very important optimization
             method by introducing Newton-type methods based on more
             general norms. Such norms are introduced both in the descent
             computation (Newton step) and in the corresponding
             stabilizing trust-region. This generalization opens up new
             possibilities in the extraction of the Newton step,
             including benefits such as mathematical stability and the
             incorporation of smoothness constraints. We first present
             the derivation of the modified Newton step in the calculus
             of variation framework needed for image processing. Then, we
             demonstrate the method with two common objective
             functionals: variational image deblurring and geometric
             active contours for image segmentation. We show that in
             addition to the fast convergence, norms adapted to the
             problem at hand yield different and superior
             results.},
   Doi = {10.1137/080722436},
   Key = {fds264865}
}

@article{fds264868,
   Author = {Aganj, I and Lenglet, C and Sapiro, G and Yacoub, E and Ugurbil, K and Harel, N},
   Title = {Multiple Q-shell ODF reconstruction in Q-ball
             imaging.},
   Journal = {Medical image computing and computer-assisted intervention :
             MICCAI ... International Conference on Medical Image
             Computing and Computer-Assisted Intervention},
   Volume = {12},
   Number = {Pt 2},
   Pages = {423-431},
   Year = {2009},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/20426140},
   Abstract = {Q-ball imaging (QBI) is a high angular resolution diffusion
             imaging (HARDI) technique which has been proven very
             successful in resolving multiple intravoxel fiber
             orientations in MR images. The standard computation of the
             orientation distribution function (ODF, the probability of
             diffusion in a given direction) from q-ball uses linear
             radial projection, neglecting the change in the volume
             element along the ray, thereby resulting in distributions
             different from the true ODFs. A new technique has been
             recently proposed that, by considering the solid angle
             factor, uses the mathematically correct definition of the
             ODF and results in a dimensionless and normalized ODF
             expression from a single q-shell. In this paper, we extend
             this technique in order to exploit HARDI data from multiple
             q-shells. We consider the more flexible multi-exponential
             model for the diffusion signal, and show how to efficiently
             compute the ODFs in constant solid angle. We describe our
             method and demonstrate its improved performance on both
             artificial and real HARDI data.},
   Doi = {10.1007/978-3-642-04271-3_52},
   Key = {fds264868}
}

@article{fds265038,
   Author = {Mahmoudi, M and Sapiro, G},
   Title = {Three-dimensional point cloud recognition via distributions
             of geometric distances},
   Journal = {Graphical Models},
   Volume = {71},
   Number = {1},
   Pages = {22-31},
   Publisher = {Elsevier BV},
   Year = {2009},
   Month = {January},
   ISSN = {1524-0703},
   url = {http://dx.doi.org/10.1016/j.gmod.2008.10.002},
   Abstract = {A geometric framework for the recognition of
             three-dimensional objects represented by point clouds is
             introduced in this paper. The proposed approach is based on
             comparing distributions of intrinsic measurements on the
             point cloud. In particular, intrinsic distances are
             exploited as signatures for representing the point clouds.
             The first signature we introduce is the histogram of
             pairwise diffusion distances between all points on the shape
             surface. These distances represent the probability of
             traveling from one point to another in a fixed number of
             random steps, the average intrinsic distances of all
             possible paths of a given number of steps between the two
             points. This signature is augmented by the histogram of the
             actual pairwise geodesic distances in the point cloud, the
             distribution of the ratio between these two distances, as
             well as the distribution of the number of times each point
             lies on the shortest paths between other points. These
             signatures are not only geometric but also invariant to
             bends. We further augment these signatures by the
             distribution of a curvature function and the distribution of
             a curvature weighted distance. These histograms are compared
             using the χ2 or other common distance metrics for
             distributions. The presentation of the framework is
             accompanied by theoretical and geometric justification and
             state-of-the-art experimental results with the standard
             Princeton 3D shape benchmark, ISDB, and nonrigid 3D
             datasets. We also present a detailed analysis of the
             particular relevance of each one of the different proposed
             histogram-based signatures. Finally, we briefly discuss a
             more local approach where the histograms are computed for a
             number of overlapping patches from the object rather than
             the whole shape, thereby opening the door to partial shape
             comparisons. © 2008 Elsevier Inc. All rights
             reserved.},
   Doi = {10.1016/j.gmod.2008.10.002},
   Key = {fds265038}
}

@article{fds265050,
   Author = {Mairal, J and Bach, F and Ponce, J and Sapiro, G},
   Title = {Online dictionary learning for sparse coding},
   Journal = {Proceedings of the 26th International Conference On Machine
             Learning, ICML 2009},
   Pages = {689-696},
   Year = {2009},
   Month = {January},
   Abstract = {Sparse coding - that is, modelling data vectors as sparse
             linear combinations of basis elements - is widely used in
             machine learning, neuroscience, signal processing, and
             statistics. This paper focuses on learning the basis set,
             also called dictionary, to adapt it to specific data, an
             approach that has recently proven to be very effective for
             signal reconstruction and classification in the audio and
             image processing domains. This paper proposes a new online
             optimization algorithm for dictionary learning, based on
             stochastic approximations, which scales up gracefully to
             large datasets with millions of training samples. A proof of
             convergence is presented, along with experiments with
             natural images demonstrating that it leads to faster
             performance and better dictionaries than classical batch
             algorithms for both small and large datasets.},
   Key = {fds265050}
}

@article{fds265053,
   Author = {Lecumberry, F and Pardo, A and Sapiro, G},
   Title = {Multiple shape models for simultaneous object classification
             and segmentation},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Pages = {3001-3004},
   Publisher = {IEEE},
   Year = {2009},
   Month = {January},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2009.5414596},
   Abstract = {Shape models (SMs), capturing the common features of a set
             of training shapes, represent a new incoming object based on
             its projection onto the corresponding model. Given a set of
             learned SMs representing different objects, and an image
             with a new shape, this work introduces a joint
             classification- segmentation framework with a twofold goal.
             First, to automatically select the SM that best represents
             the object, and second, to accurately segment the image
             taking into account both the image information and the
             features and variations learned from the on-line selected
             model. A new energy functional is introduced that
             simultaneously accomplishes both goals. Model selection is
             performed based on a shape similarity measure, determining
             which model to use at each iteration of the steepest descent
             minimization, allowing for model switching and adaptation to
             the data. High-order SMs are used in order to deal with very
             similar object classes and natural variability within them.
             The presentation of the framework is complemented with
             examples for the difficult task of simultaneously
             classifying and segmenting closely related shapes, stages of
             human activities, in images with severe occlusions. ©2009
             IEEE.},
   Doi = {10.1109/ICIP.2009.5414596},
   Key = {fds265053}
}

@article{fds265054,
   Author = {Mairal, J and Bach, F and Ponce, J and Sapiro, G and Zisserman,
             A},
   Title = {Supervised dictionary learning},
   Journal = {Advances in Neural Information Processing Systems 21 -
             Proceedings of the 2008 Conference},
   Pages = {1033-1040},
   Year = {2009},
   Month = {January},
   Abstract = {It is now well established that sparse signal models are
             well suited for restoration tasks and can be effectively
             learned from audio, image, and video data. Recent research
             has been aimed at learning discriminative sparse models
             instead of purely reconstructive ones. This paper proposes a
             new step in that direction, with a novel sparse
             representation for signals belonging to different classes in
             terms of a shared dictionary and discriminative class
             models. The linear version of the proposed model admits a
             simple probabilistic interpretation, while its most general
             variant admits an interpretation in terms of kernels. An
             optimization framework for learning all the components of
             the proposed model is presented, along with experimental
             results on standard handwritten digit and texture
             classification tasks.},
   Key = {fds265054}
}

@article{fds265124,
   Author = {Zhou, M and Chen, H and Paisley, J and Ren, L and Sapiro, G and Carin,
             L},
   Title = {Non-parametric Bayesian dictionary learning for sparse image
             representations},
   Journal = {Advances in Neural Information Processing Systems 22 -
             Proceedings of the 2009 Conference},
   Pages = {2295-2303},
   Year = {2009},
   Month = {January},
   Abstract = {Non-parametric Bayesian techniques are considered for
             learning dictionaries for sparse image representations, with
             applications in denoising, inpainting and com-pressive
             sensing (CS). The beta process is employed as a prior for
             learning the dictionary, and this non-parametric method
             naturally infers an appropriate dictionary size. The
             Dirichlet process and a probit stick-breaking process are
             also considered to exploit structure within an image. The
             proposed method can learn a sparse dictionary in situ;
             training images may be exploited if available, but they are
             not required. Further, the noise variance need not be known,
             and can be non-stationary. Another virtue of the proposed
             method is that sequential inference can be readily employed,
             thereby allowing scaling to large images. Several example
             results are presented, using both Gibbs and variational
             Bayesian inference, with comparisons to other
             state-of-the-art approaches.},
   Key = {fds265124}
}

@article{fds264752,
   Author = {Matonti, F and Sapiro, G},
   Title = {The commitment of intellectuals: new viewpoints},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {176-77},
   Pages = {4-7},
   Year = {2009},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000265109700001&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264752}
}

@article{fds264782,
   Author = {Sapiro, G},
   Title = {Intellectuals and politics: A Typology},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {176-77},
   Pages = {8-+},
   Year = {2009},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000265109700002&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264782}
}

@article{fds265029,
   Author = {Bar, L and Sapiro, G},
   Title = {Generalized newton methods for energy formulations in image
             processing},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Pages = {809-812},
   Publisher = {IEEE},
   Year = {2008},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2008.4711878},
   Abstract = {Many problems in image processing are solved via the
             minimization of a cost functional. The most widely used
             optimization technique is the gradient descent, often used
             due to its simplicity and applicability where other
             optimization techniques, e.g., those coming from discrete
             optimization, can not be used. Yet, gradient descent suffers
             from a slow convergence, and often to just local minima
             which highly depends on the condition number of the
             functional Hessian. Newton-type methods, on the other hand,
             are known to have a rapid (quadratic) convergence. In its
             classical form, the Newton method relies on the L2-type norm
             to define the descent direction. In this paper, we
             generalize and reformulate this very important optimization
             method by introducing a novel Newton method based on general
             norms. This generalization opens up new possibilities in the
             extraction of the Newton step, including benefits such as
             mathematical stability and smoothness constraints. We first
             present the derivation of the modified Newton step in the
             calculus of variation framework. Then we demonstrate the
             method with two common objective functionals: variational
             image deblurring and geodesic active contours. We show that
             in addition to the fast convergence, different selections
             norm yield different and superior results. © 2008
             IEEE.},
   Doi = {10.1109/ICIP.2008.4711878},
   Key = {fds265029}
}

@article{fds265039,
   Author = {Haro, G and Randall, G and Sapiro, G},
   Title = {Translated poisson mixture model for stratification
             learning},
   Journal = {International Journal of Computer Vision},
   Volume = {80},
   Number = {3},
   Pages = {358-374},
   Publisher = {Springer Nature},
   Year = {2008},
   Month = {December},
   ISSN = {0920-5691},
   url = {http://dx.doi.org/10.1007/s11263-008-0144-6},
   Abstract = {A framework for the regularized and robust estimation of
             non-uniform dimensionality and density in high dimensional
             noisy data is introduced in this work. This leads to
             learning stratifications, that is, mixture of manifolds
             representing different characteristics and complexities in
             the data set. The basic idea relies on modeling the high
             dimensional sample points as a process of translated Poisson
             mixtures, with regularizing restrictions, leading to a model
             which includes the presence of noise. The translated Poisson
             distribution is useful to model a noisy counting process,
             and it is derived from the noise-induced translation of a
             regular Poisson distribution. By maximizing the
             log-likelihood of the process counting the points falling
             into a local ball, we estimate the local dimension and
             density. We show that the sequence of all possible local
             countings in a point cloud formed by samples of a
             stratification can be modeled by a mixture of different
             translated Poisson distributions, thus allowing the presence
             of mixed dimensionality and densities in the same data set.
             With this statistical model, the parameters which best
             describe the data, estimated via expectation maximization,
             divide the points in different classes according to both
             dimensionality and density, together with an estimation of
             these quantities for each class. Theoretical asymptotic
             results for the model are presented as well. The
             presentation of the theoretical framework is complemented
             with artificial and real examples showing the importance of
             regularized stratification learning in high dimensional data
             analysis in general and computer vision and image analysis
             in particular. © 2008 Springer Science+Business Media,
             LLC.},
   Doi = {10.1007/s11263-008-0144-6},
   Key = {fds265039}
}

@article{fds265037,
   Author = {Narasimha, R and Aganj, I and Bennett, AE and Borgnia, MJ and Zabransky,
             D and Sapiro, G and McLaughlin, SW and Milne, JLS and Subramaniam,
             S},
   Title = {Evaluation of denoising algorithms for biological electron
             tomography.},
   Journal = {J Struct Biol},
   Volume = {164},
   Number = {1},
   Pages = {7-17},
   Year = {2008},
   Month = {October},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18585059},
   Abstract = {Tomograms of biological specimens derived using transmission
             electron microscopy can be intrinsically noisy due to the
             use of low electron doses, the presence of a "missing wedge"
             in most data collection schemes, and inaccuracies arising
             during 3D volume reconstruction. Before tomograms can be
             interpreted reliably, for example, by 3D segmentation, it is
             essential that the data be suitably denoised using
             procedures that can be individually optimized for specific
             data sets. Here, we implement a systematic procedure to
             compare various nonlinear denoising techniques on tomograms
             recorded at room temperature and at cryogenic temperatures,
             and establish quantitative criteria to select a denoising
             approach that is most relevant for a given tomogram. We
             demonstrate that using an appropriate denoising algorithm
             facilitates robust segmentation of tomograms of HIV-infected
             macrophages and Bdellovibrio bacteria obtained from
             specimens at room and cryogenic temperatures, respectively.
             We validate this strategy of automated segmentation of
             optimally denoised tomograms by comparing its performance
             with manual extraction of key features from the same
             tomograms.},
   Doi = {10.1016/j.jsb.2008.04.006},
   Key = {fds265037}
}

@article{fds265027,
   Author = {Mairal, J and Bach, F and Ponce, J and Sapiro, G and Zisserman,
             A},
   Title = {Discriminative learned dictionaries for local image
             analysis},
   Journal = {26th IEEE Conference on Computer Vision and Pattern
             Recognition, CVPR},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/CVPR.2008.4587652},
   Abstract = {Sparse signal models have been the focus of much recent
             research, leading to (or improving upon) state-of-the-art
             results in signal, image, and video restoration. This
             article extends this line of research into a novel framework
             for local image discrimination tasks, proposing an energy
             formulation with both sparse reconstruction and class
             discrimination components, jointly optimized during
             dictionary learning. This approach improves over the state
             of the art in texture segmentation experiments using the
             Brodatz database, and it paves the way for a novel scene
             analysis and recognition framework based on simultaneously
             learning discriminative and reconstructive dictionaries.
             Preliminary results in this direction using examples from
             the Pascal VOC06 and Graz02 datasets are presented as well.
             ©2008 IEEE.},
   Doi = {10.1109/CVPR.2008.4587652},
   Key = {fds265027}
}

@article{fds265025,
   Author = {Rother, D and Williams, L and Sapiro, G},
   Title = {Super-resolution texturing for online virtual
             globes},
   Journal = {2008 IEEE Computer Society Conference on Computer Vision and
             Pattern Recognition Workshops, CVPR Workshops},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/CVPRW.2008.4562961},
   Abstract = {Online virtual globe applications such as Google Earth and
             Maps, Microsoft Virtual Earth, and Yahoo! Maps, allow users
             to explore realistic models of the Earth. To provide the
             ground-level detail of interest to users, it is necessary to
             serve and render high resolution images. For planetary
             coverage at high resolution, a very large number of images
             need to be acquired, stored, and transmitted, with
             consequent high costs and difficulty for the application
             provider, often resulting in lower than expected
             performance. In this work we propose a supplementary
             approach to render appropriate visual information in these
             applications. Using super-resolution techniques based on the
             combination and extension of known texture transfer and
             synthesis algorithms, we develop a system to efficiently
             synthesize fine detail consistent with the textures served.
             This approach dramatically reduces the operational cost of
             virtual globe displays, which are among the most
             image-intensive applications on the Internet, while at the
             same time improving their appearance. The proposed framework
             is fast and preserves the coherence between corresponding
             images at different resolutions, allowing consistent and
             responsive interactive zooming and panning operations. The
             framework is capable of adapting a library of multiscale
             textures to pre-segmented regions in the highest-resolution
             texture maps available. We also describe a simple interface
             to obtain class label information from contributing users.
             The presentation of the constituent techniques is
             complemented with examples simulating our framework embedded
             in Google Earth. © 2008 IEEE.},
   Doi = {10.1109/CVPRW.2008.4562961},
   Key = {fds265025}
}

@article{fds265026,
   Author = {Mahmoudi, M and Sapiro, G},
   Title = {Three-dimensional point cloud recognition via distributions
             of geometric distances},
   Journal = {2008 IEEE Computer Society Conference on Computer Vision and
             Pattern Recognition Workshops, CVPR Workshops},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/CVPRW.2008.4563031},
   Abstract = {A geometric framework for the recognition of
             three-dimensional objects represented by point clouds is
             introduced in this paper The proposed approach is based on
             comparing distributions of intrinsic measurements on the
             point cloud. In particular, intrinsic distances are
             exploited as signatures for representing the point clouds.
             The first signature we introduce is the histogram of
             pairwise diffusion distances between all points on the shape
             surface. These distances represent the probability of
             traveling from one point to another in a fixed number of
             random steps, the average intrinsic distances of all
             possible paths of a given number of steps between the two
             points. This signature is augmented by the histogram of the
             actual pairwise geodesic distances, as well as the
             distribution of the ratio between these two distances. These
             signatures are not only geometric but also invariant to
             bends. We further augment these signatures by the
             distribution of a curvature function and the distribution of
             a curvature weighted distance. These histograms are compared
             using the χ2 or other common distance metrics for
             distributions. The presentation of the framework is
             accompanied by theoretical justification and
             state-of-the-art experimental results with the standard
             Princeton 3D shape benchmark and ISDB datasets, as well as a
             detailed analysis of the particular relevance of each one of
             the different histogram-based signatures. Finally, we
             briefly discuss a more local approach where the histograms
             are computed for a number of overlapping patches from the
             object rather than the whole shape, thereby opening the door
             to partial shape comparisons. © 2008 IEEE.},
   Doi = {10.1109/CVPRW.2008.4563031},
   Key = {fds265026}
}

@article{fds265028,
   Author = {Rother, D and Patwardhan, K and Aganj, I and Sapiro,
             G},
   Title = {3D priors for scene learning from a single
             view},
   Journal = {2008 IEEE Computer Society Conference on Computer Vision and
             Pattern Recognition Workshops, CVPR Workshops},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/CVPRW.2008.4563034},
   Abstract = {A framework for scene learning from a single still video
             camera is presented in this work. In particular, the camera
             transformation and the direction of the shadows are learned
             using information extracted from pedestrians walking in the
             scene. The proposed approach poses the scene learning
             estimation as a likelihood maximization problem, efficiently
             solved via factorization and dynamic programming, and
             amenable to an online implementation. We introduce a 3D
             prior to model the pedestrian's appearance from any
             viewpoint, and learn it using a standard off-the-shelf
             consumer video camera and the Radon transform. This 3D prior
             or "appearance model" is used to quantify the agreement
             between the tentative parameters and the actual video
             observations, taking into account not only the pixels
             occupied by the pedestrian, but also those occupied by the
             his shadows and/or reflections. The presentation of the
             framework is complemented with an example of a casual video
             scene showing the importance of the learned 3D pedestrian
             prior and the accuracy of the proposed approach. © 2008
             IEEE.},
   Doi = {10.1109/CVPRW.2008.4563034},
   Key = {fds265028}
}

@article{fds265021,
   Author = {Liao, HY and Sapiro, G},
   Title = {Sparse representations for limited data tomography},
   Journal = {2008 5th IEEE International Symposium on Biomedical Imaging:
             From Nano to Macro, Proceedings, ISBI},
   Pages = {1375-1378},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/ISBI.2008.4541261},
   Abstract = {In limited data tomography, with applications such as
             electron microscopy and medical imaging, the scanning views
             are within an angular range that is often both limited and
             sparsely sampled. In these situations, standard algorithms
             produce reconstructions with notorious artifacts. We show in
             this paper that a sparsity image representation principle,
             based on learning dictionaries for sparse representations of
             image patches, leads to significantly improved
             reconstructions of the unknown density from its limited
             angle projections. The presentation of the underlying
             framework is complemented with illustrative results on
             artificial and real data. ©2008 IEEE.},
   Doi = {10.1109/ISBI.2008.4541261},
   Key = {fds265021}
}

@article{fds265023,
   Author = {Aganj, I and Sapiro, G and Parikshak, N and Madsen, SK and Thompson,
             PM},
   Title = {Segmentation-free measurement of cortical thickness from
             MRI},
   Journal = {2008 5th IEEE International Symposium on Biomedical Imaging:
             From Nano to Macro, Proceedings, ISBI},
   Pages = {1625-1628},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/ISBI.2008.4541324},
   Abstract = {Estimating the thickness of cerebral cortex is a key step in
             many MR brain imaging studies, revealing valuable
             information on development or disease progression. In this
             work we present a new approach to measure the cortical
             thickness, based on minimizing line integrals over the
             probability map of the gray matter in the MRI volume.
             Previous methods often perform a binary-valued segmentation
             of the gray matter before measuring the thickness. Because
             of image noise and partial voluming, such a hard
             classification ignores the underlying class probabilities
             assigned to each voxel, discarding potentially useful
             information. We describe our proposed method and demonstrate
             its performance on both artificial volumes and real 3D brain
             MRI data from subjects with Alzheimer's disease and healthy
             individuals. ©2008 IEEE.},
   Doi = {10.1109/ISBI.2008.4541324},
   Key = {fds265023}
}

@article{fds265024,
   Author = {Haro, G and Lenglet, C and Sapiro, G and Thompson,
             P},
   Title = {On the non-uniform complexity of brain connectivity},
   Journal = {2008 5th IEEE International Symposium on Biomedical Imaging:
             From Nano to Macro, Proceedings, ISBI},
   Pages = {887-890},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/ISBI.2008.4541139},
   Abstract = {A stratification and manifold learning approach for
             analyzing High Angular Resolution Diffusion Imaging (HARDI)
             data is introduced in this paper. HARDI data provides
             high-dimensional signals measuring the complex
             microstructure of biological tissues, such as the cerebral
             white matter. We show that these high-dimensional spaces may
             be understood as unions of manifolds of varying
             dimensions/complexity and densities. With such analysis, we
             use clustering to characterize the structural complexity of
             the white matter. We briefly present the underlying
             framework and numerical experiments illustrating this
             original and promising approach. ©2008 IEEE.},
   Doi = {10.1109/ISBI.2008.4541139},
   Key = {fds265024}
}

@article{fds265036,
   Author = {Liu, J and Bartesaghi, A and Borgnia, MJ and Sapiro, G and Subramaniam,
             S},
   Title = {Molecular architecture of native HIV-1 gp120
             trimers.},
   Journal = {Nature},
   Volume = {455},
   Number = {7209},
   Pages = {109-113},
   Year = {2008},
   Month = {September},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18668044},
   Abstract = {The envelope glycoproteins (Env) of human and simian
             immunodeficiency viruses (HIV and SIV, respectively) mediate
             virus binding to the cell surface receptor CD4 on target
             cells to initiate infection. Env is a heterodimer of a
             transmembrane glycoprotein (gp41) and a surface glycoprotein
             (gp120), and forms trimers on the surface of the viral
             membrane. Using cryo-electron tomography combined with
             three-dimensional image classification and averaging, we
             report the three-dimensional structures of trimeric Env
             displayed on native HIV-1 in the unliganded state, in
             complex with the broadly neutralizing antibody b12 and in a
             ternary complex with CD4 and the 17b antibody. By fitting
             the known crystal structures of the monomeric gp120 core in
             the b12- and CD4/17b-bound conformations into the density
             maps derived by electron tomography, we derive molecular
             models for the native HIV-1 gp120 trimer in unliganded and
             CD4-bound states. We demonstrate that CD4 binding results in
             a major reorganization of the Env trimer, causing an outward
             rotation and displacement of each gp120 monomer. This
             appears to be coupled with a rearrangement of the gp41
             region along the central axis of the trimer, leading to
             closer contact between the viral and target cell membranes.
             Our findings elucidate the structure and conformational
             changes of trimeric HIV-1 gp120 relevant to antibody
             neutralization and attachment to target cells.},
   Doi = {10.1038/nature07159},
   Key = {fds265036}
}

@article{fds265035,
   Author = {Caselles, V and Haro, G and Sapiro, G and Verdera,
             J},
   Title = {On geometric variational models for inpainting surface
             holes},
   Journal = {Computer Vision and Image Understanding},
   Volume = {111},
   Number = {3},
   Pages = {351-373},
   Publisher = {Elsevier BV},
   Year = {2008},
   Month = {September},
   ISSN = {1077-3142},
   url = {http://dx.doi.org/10.1016/j.cviu.2008.01.002},
   Abstract = {Geometric approaches for filling-in surface holes are
             introduced and studied in this paper. The basic principle is
             to choose the completing surface as one which minimizes a
             power of the mean curvature. We interpret this principle in
             a level set formulation, that is, we represent the surface
             of interest in implicit form and we construct an energy
             functional for the embedding function u. We first explore
             two different formulations (which can be considered as
             alternative) inspired by the above principle: in the first
             one we write the mean curvature as the divergence of the
             normal vector field θ to the isosurfaces of u; in the
             second one we used the signed distance function D to the
             surface as embedding function and we write the mean
             curvature in terms of it. Then we solve the Euler-Lagrange
             equations of these functionals which consist of a system of
             second order partial differential equations (PDEs) for u and
             θ, in the first case, or a fourth order PDE for D in the
             second case. Then, simpler methods based on second order
             elliptic PDEs, like Laplace equation or the absolutely
             minimizing Lipschitz extension, are also proposed and
             compared with the above higher order methods. The
             theoretical and computational framework, as well as examples
             with synthetic and real data, are presented in this paper.
             © 2008 Elsevier Inc. All rights reserved.},
   Doi = {10.1016/j.cviu.2008.01.002},
   Key = {fds265035}
}

@article{fds265033,
   Author = {Duarte-Carvajalino, JM and Sapiro, G and Vélez-Reyesvelez-Reyes,
             M and Castillo, PE},
   Title = {Multiscale representation and segmentation of hyperspectral
             imagery using geometric partial differential equations and
             algebraic multigrid methods},
   Journal = {IEEE Transactions on Geoscience and Remote
             Sensing},
   Volume = {46},
   Number = {8},
   Pages = {2418-2434},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {August},
   ISSN = {0196-2892},
   url = {http://dx.doi.org/10.1109/TGRS.2008.916478},
   Abstract = {A fast algorithm for multiscale representation and
             segmentation of hyperspectral imagery is introduced in this
             paper. The multiscale/scale-space representation is obtained
             by solving a nonlinear diffusion partial differential
             equation (PDE) for vector-valued images. We use algebraic
             multigrid techniques to obtain a fast and scalable solution
             of the PDE and to segment the hyperspectral image following
             the intrinsic multigrid structure. We test our algorithm on
             four standard hyperspectral images that represent different
             environments commonly found in remote sensing applications:
             agricultural, urban, mining, and marine. The experimental
             results show that the segmented images lead to better
             classification than using the original data directly, in
             spite of the use of simple similarity metrics and piecewise
             constant approximations obtained from the segmentation maps.
             © 2006 IEEE.},
   Doi = {10.1109/TGRS.2008.916478},
   Key = {fds265033}
}

@article{fds264792,
   Author = {Sapiro, G},
   Title = {Translation and the field of publishing},
   Journal = {Translation Studies},
   Volume = {1},
   Number = {2},
   Pages = {154-166},
   Publisher = {Informa UK Limited},
   Year = {2008},
   Month = {July},
   ISSN = {1478-1700},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000208082300002&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1080/14781700802113473},
   Key = {fds264792}
}

@article{fds265020,
   Author = {Duarte-Carvajalino, JM and Sapiro, G and Velez-Reyes,
             M},
   Title = {Unsupervised spectral-spatial classification of
             hyperspectral imagery using real and complex features and
             generalized histograms},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {6966},
   Publisher = {SPIE},
   Year = {2008},
   Month = {June},
   ISSN = {0277-786X},
   url = {http://dx.doi.org/10.1117/12.779142},
   Abstract = {In this work, we study unsupervised classification
             algorithms for hyperspectral images based on band-by-band
             scalar histograms and vector-valued generalized histograms,
             obtained by vector quantization. The corresponding
             histograms are compared by dissimilarity metrics such as the
             chi-square, Kolmogorov-Smirnorv, and earth mover's
             distances. The histograms are constructed from homogeneous
             regions in the images identified by a pre-segmentation
             algorithm and distance metrics between pixels. We compare
             the traditional spectral-only segmentation algorithms
             C-means and ISODATA, versus spectral-spatial segmentation
             algorithms such as unsupervised ECHO and a novel
             segmentation algorithm based on scale-space concepts. We
             also evaluate the use of complex features consisting of the
             real spectrum and its derivative as the imaginary part. The
             comparison between the different segmentation algorithms and
             distance metrics is based on their unsupervised
             classification accuracy using three real hyperspectral
             images with known ground truth.},
   Doi = {10.1117/12.779142},
   Key = {fds265020}
}

@article{fds265031,
   Author = {Bartesaghi, A and Sprechmann, P and Liu, J and Randall, G and Sapiro, G and Subramaniam, S},
   Title = {Classification and 3D averaging with missing wedge
             correction in biological electron tomography.},
   Journal = {Journal of structural biology},
   Volume = {162},
   Number = {3},
   Pages = {436-450},
   Year = {2008},
   Month = {June},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18440828},
   Abstract = {Strategies for the determination of 3D structures of
             biological macromolecules using electron crystallography and
             single-particle electron microscopy utilize powerful tools
             for the averaging of information obtained from 2D projection
             images of structurally homogeneous specimens. In contrast,
             electron tomographic approaches have often been used to
             study the 3D structures of heterogeneous, one-of-a-kind
             objects such as whole cells where image-averaging strategies
             are not applicable. Complex entities such as cells and
             viruses, nevertheless, contain multiple copies of numerous
             macromolecules that can individually be subjected to 3D
             averaging. Here we present a complete framework for
             alignment, classification, and averaging of volumes derived
             by electron tomography that is computationally efficient and
             effectively accounts for the missing wedge that is inherent
             to limited-angle electron tomography. Modeling the missing
             data as a multiplying mask in reciprocal space we show that
             the effect of the missing wedge can be accounted for
             seamlessly in all alignment and classification operations.
             We solve the alignment problem using the convolution theorem
             in harmonic analysis, thus eliminating the need for
             approaches that require exhaustive angular search, and adopt
             an iterative approach to alignment and classification that
             does not require the use of external references. We
             demonstrate that our method can be successfully applied for
             3D classification and averaging of phantom volumes as well
             as experimentally obtained tomograms of GroEL where the
             outcomes of the analysis can be quantitatively compared
             against the expected results.},
   Doi = {10.1016/j.jsb.2008.02.008},
   Key = {fds265031}
}

@article{fds265032,
   Author = {Singh, G and Memoli, F and Ishkhanov, T and Sapiro, G and Carlsson, G and Ringach, DL},
   Title = {Topological analysis of population activity in visual
             cortex.},
   Journal = {Journal of vision},
   Volume = {8},
   Number = {8},
   Pages = {11.1-1118},
   Year = {2008},
   Month = {June},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18831634},
   Abstract = {Information in the cortex is thought to be represented by
             the joint activity of neurons. Here we describe how
             fundamental questions about neural representation can be
             cast in terms of the topological structure of population
             activity. A new method, based on the concept of persistent
             homology, is introduced and applied to the study of
             population activity in primary visual cortex (V1). We found
             that the topological structure of activity patterns when the
             cortex is spontaneously active is similar to those evoked by
             natural image stimulation and consistent with the topology
             of a two sphere. We discuss how this structure could emerge
             from the functional organization of orientation and spatial
             frequency maps and their mutual relationship. Our findings
             extend prior results on the relationship between spontaneous
             and evoked activity in V1 and illustrates how computational
             topology can help tackle elementary questions about the
             representation of information in the nervous
             system.},
   Doi = {10.1167/8.8.11},
   Key = {fds265032}
}

@article{fds265034,
   Author = {Liu, J and Bartesaghi, A and Borgnia, MJ and Sapiro, G and Subramaniam,
             S},
   Title = {Molecular Architecture of Native HIV-1 gp 120
             Trimers},
   Journal = {Chemtracts},
   Volume = {21},
   Number = {6},
   Pages = {227-228},
   Year = {2008},
   Month = {June},
   ISSN = {1431-9268},
   Abstract = {A critical step in human and simian immunodeficiency virus
             (HIV and SIV, respectively) pathogenesis is entry into the
             target cell. The process of infection is mediated by
             envelope glycoproteins, Env, which assemble in a trim- eric
             form on the surface of the virus and bind exposed CD4
             molecules on target cells. A monomeric Env is composed of a
             transmembrane subunit, gp41, and a surface subunit, gpl20.
             Although there are crystal structures of monomeric Env
             associated with Fab fragments from the neutralizing antibody
             bl2 in the CD4 bound and free forms, the structural
             elucidation of the biologically relevant native trimeric
             state has been a challenge. In this study, the authors used
             cryoelectron tomography, in conjunction with the crystallo-
             graphic information from monomers, to create a model of the
             trimeric HIV- 1 gpl20 with or without CD4 (Fig. 1).
             Essentially, the strategy involved fitting of the crystal
             structure of the monomers into the density of the trimer
             obtained from cryoelectron tomography. From the ligand-bound
             and ligand- free molecular models, a schematic of the
             conformational changes that occur upon CD4 binding was
             presented (Fig. 1). Briefly, the trimer sticks out of the
             viral membrane, referred to as the spike, as anchored by
             gp41 (blue) and with each gpl20 (red) having an exposed CD4
             binding site (orange). Upon binding of the ligand, CD4
             (yellow), the gpl20 monomers undergo a conformational change
             to expose the V3 loop, shown as a green patch on Figure le.
             Such conformational change orients the trimer for proper
             recognition of a chemokine receptor, as depicted in Figure
             If. © 2008 Data Trace Publishing Company.},
   Key = {fds265034}
}

@article{fds265030,
   Author = {Patwardhan, KA and Sapiro, G and Morellas, V},
   Title = {Robust foreground detection in video using pixel
             layers.},
   Journal = {IEEE transactions on pattern analysis and machine
             intelligence},
   Volume = {30},
   Number = {4},
   Pages = {746-751},
   Year = {2008},
   Month = {April},
   ISSN = {0162-8828},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18276979},
   Abstract = {A framework for robust foreground detection that works under
             difficult conditions such as dynamic background and
             moderately moving camera is presented in this paper. The
             proposed method includes two main components: coarse scene
             representation as the union of pixel layers, and foreground
             detection in video by propagating these layers using a
             maximum-likelihood assignment. We first cluster into
             "layers" those pixels that share similar statistics. The
             entire scene is then modeled as the union of such
             non-parametric layer-models. An in-coming pixel is detected
             as foreground if it does not adhere to these adaptive models
             of the background. A principled way of computing thresholds
             is used to achieve robust detection performance with a
             pre-specified number of false alarms. Correlation between
             pixels in the spatial vicinity is exploited to deal with
             camera motion without precise registration or optical flow.
             The proposed technique adapts to changes in the scene, and
             allows to automatically convert persistent foreground
             objects to background and re-convert them to foreground when
             they become interesting. This simple framework addresses the
             important problem of robust foreground and unusual region
             detection, at about 10 frames per second on a standard
             laptop computer. The presentation of the proposed approach
             is complemented by results on challenging real data and
             comparisons with other standard techniques.},
   Doi = {10.1109/tpami.2007.70843},
   Key = {fds265030}
}

@article{fds264864,
   Author = {Sapiro, G},
   Title = {Message from the Editor-in-Chief},
   Journal = {SIAM Journal on Imaging Sciences},
   Volume = {1},
   Number = {1},
   Pages = {1-1},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2008},
   Month = {January},
   url = {http://dx.doi.org/10.1137/sjisbi000001000001000001000001},
   Doi = {10.1137/sjisbi000001000001000001000001},
   Key = {fds264864}
}

@article{fds265018,
   Author = {Rother, D and Sapiro, G and Pande, V},
   Title = {Statistical characterization of protein ensembles.},
   Journal = {IEEE/ACM transactions on computational biology and
             bioinformatics},
   Volume = {5},
   Number = {1},
   Pages = {42-55},
   Year = {2008},
   Month = {January},
   ISSN = {1545-5963},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18245874},
   Abstract = {When accounting for structural fluctuations or measurement
             errors, a single rigid structure may not be sufficient to
             represent a protein. One approach to solve this problem is
             to represent the possible conformations as a discrete set of
             observed conformations, an ensemble. In this work, we follow
             a different richer approach, and introduce a framework for
             estimating probability density functions in very high
             dimensions, and then apply it to represent ensembles of
             folded proteins. This proposed approach combines techniques
             such as kernel density estimation, maximum likelihood,
             cross-validation, and bootstrapping. We present the
             underlying theoretical and computational framework and apply
             it to artificial data and protein ensembles obtained from
             molecular dynamics simulations. We compare the results with
             those obtained experimentally, illustrating the potential
             and advantages of this representation.},
   Doi = {10.1109/tcbb.2007.1061},
   Key = {fds265018}
}

@article{fds265022,
   Author = {Mairal, J and Elad, M and Sapiro, G},
   Title = {Sparse representation for color image restoration.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {17},
   Number = {1},
   Pages = {53-69},
   Year = {2008},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18229804},
   Abstract = {Sparse representations of signals have drawn considerable
             interest in recent years. The assumption that natural
             signals, such as images, admit a sparse decomposition over a
             redundant dictionary leads to efficient algorithms for
             handling such sources of data. In particular, the design of
             well adapted dictionaries for images has been a major
             challenge. The K-SVD has been recently proposed for this
             task and shown to perform very well for various grayscale
             image processing tasks. In this paper, we address the
             problem of learning dictionaries for color images and extend
             the K-SVD-based grayscale image denoising algorithm that
             appears in. This work puts forward ways for handling
             nonhomogeneous noise and missing information, paving the way
             to state-of-the-art results in applications such as color
             image denoising, demosaicing, and inpainting, as
             demonstrated in this paper.},
   Doi = {10.1109/tip.2007.911828},
   Key = {fds265022}
}

@article{Mairal2008,
   Author = {Mairal, J and Sapiro, G and Elad, M},
   Title = {Learning multiscale sparse representations for image and
             video restoration},
   Journal = {Multiscale Modeling and Simulation},
   Volume = {7},
   Number = {1},
   Pages = {214-241},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2008},
   Month = {January},
   ISSN = {1540-3459},
   url = {http://dx.doi.org/10.1137/070697653},
   Abstract = {This paper presents a framework for learning multiscale
             sparse representations of color images and video with
             overcomplete dictionaries. A single-scale K-SVD algorithm
             was introduced in [M. Aharon, M. Elad, and A. M. Bruckstein,
             IEEE Trans. Signal Process., 54 (2006), pp. 4311-4322],
             formulating sparse dictionary learning for grayscale image
             representation as an optimization problem, efficiently
             solved via orthogonal matching pursuit (OMP) and singular
             value decomposition (SVD). Following this work, we propose a
             multiscale learned representation, obtained by using an
             efficient quadtree decomposition of the learned dictionary
             and overlapping image patches. The proposed framework
             provides an alternative to predefined dictionaries such as
             wavelets and is shown to lead to state-of-the-art results in
             a number of image and video enhancement and restoration
             applications. This paper describes the proposed framework
             and accompanies it by numerous examples demonstrating its
             strength. © 2008 Society for Industrial and applied
             Mathematics.},
   Doi = {10.1137/070697653},
   Key = {Mairal2008}
}

@article{fds264767,
   Author = {Bar, L and Sapiro, G and IEEE},
   Title = {GENERALIZED NEWTON METHODS FOR ENERGY FORMULATIONS IN IMAGE
             PROCESSING},
   Journal = {2008 IEEE International Conference on Image Processing,
             Proceedings},
   Pages = {813-816},
   Year = {2008},
   ISBN = {978-1-4244-1764-3},
   ISSN = {1522-4880},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000262505000204&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264767}
}

@article{fds264835,
   Author = {Mairal, J and Bach, FR and Ponce, J and Sapiro, G and Zisserman,
             A},
   Title = {Supervised Dictionary Learning.},
   Journal = {NIPS},
   Pages = {1033-1040},
   Publisher = {Curran Associates, Inc.},
   Editor = {Koller, D and Schuurmans, D and Bengio, Y and Bottou,
             L},
   Year = {2008},
   url = {http://papers.nips.cc/book/advances-in-neural-information-processing-systems-21-2008},
   Key = {fds264835}
}

@article{fds264852,
   Author = {Rother, D and Sapiro, G and Pande, V},
   Title = {Statistical Characterization of Protein Ensembles.},
   Journal = {IEEE ACM Trans. Comput. Biol. Bioinform.},
   Volume = {5},
   Pages = {42-55},
   Year = {2008},
   url = {http://dx.doi.org/10.1145/1343571.1343575},
   Doi = {10.1145/1343571.1343575},
   Key = {fds264852}
}

@article{fds265010,
   Author = {Rother, D and Patwardhan, KA and Sapiro, G},
   Title = {What can casual walkers tell us about a 3D
             scene?},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ICCV.2007.4409083},
   Abstract = {An approach for incremental learning of a 3D scene from a
             single static video camera is presented in this paper. In
             particular, we exploit the presence of casual people walking
             in the scene to infer relative depth, learn shadows, and
             segment the critical ground structure. Considering that this
             type of video data is so ubiquitous, this work provides an
             important step towards 3D scene analysis from single cameras
             in readily available ordinary videos and movies. On-line 3D
             scene learning, as presented here, is very important for
             applications such as scene analysis, foreground refinement,
             tracking, biometrics, automated camera collaboration,
             activity analysis, identification, and real-time
             computer-graphics applications. The main contributions of
             this work are then two-fold. First, we use the people in the
             scene to continuously learn and update the 3D scene
             parameters using an incremental robust (L1) error
             minimization. Secondly, models of shadows in the scene are
             learned using a statistical framework. A symbiotic
             relationship between the shadow model and the estimated
             scene geometry is exploited towards incremental mutual
             improvement. We illustrate the effectiveness of the proposed
             framework with applications in foreground refinement,
             automatic segmentation as well as relative depth mapping of
             the floor/ground, and estimation of 3D trajectories of
             people in the scene. ©2007 IEEE.},
   Doi = {10.1109/ICCV.2007.4409083},
   Key = {fds265010}
}

@article{fds265011,
   Author = {Bai, X and Sapiro, G},
   Title = {A geodesic framework for fast interactive image and video
             segmentation and matting},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ICCV.2007.4408931},
   Abstract = {An interactive framework for soft segmentation and matting
             of natural images and videos is presented in this paper. The
             proposed technique is based on the optimal, linear time,
             computation of weighted geodesic distances to the
             user-provided scribbles, from which the whole data is
             automatically segmented. The weights are based on spatial
             and/or temporal gradients, without explicit optical flow or
             any advanced and often computationally expensive feature
             detectors. These could be naturally added to the proposed
             framework as well if desired, in the form of weights in the
             geodesic distances. A localized refinement step follows this
             fast segmentation in order to accurately compute the
             corresponding matte function. Additional constraints into
             the distance definition permit to efficiently handle
             occlusions such as people or objects crossing each other in
             a video sequence. The presentation of the framework is
             complemented with numerous and diverse examples, including
             extraction of moving foreground from dynamic background, and
             comparisons with the recent literature. ©2007
             IEEE.},
   Doi = {10.1109/ICCV.2007.4408931},
   Key = {fds265011}
}

@article{fds265012,
   Author = {Haro, G and Randall, G and Sapiro, G},
   Title = {Stratification learning: Detecting mixed density and
             dimensionality in high dimensional point
             clouds},
   Journal = {Advances in Neural Information Processing
             Systems},
   Pages = {553-560},
   Year = {2007},
   Month = {December},
   ISSN = {1049-5258},
   Abstract = {The study of point cloud data sampled from a stratification,
             a collection of manifolds with possible different
             dimensions, is pursued in this paper. We present a technique
             for simultaneously soft clustering and estimating the mixed
             dimensionality and density of such structures. The framework
             is based on a maximum likelihood estimation of a Poisson
             mixture model. The presentation of the approach is completed
             with artificial and real examples demonstrating the
             importance of extending manifold learning to stratification
             learning.},
   Key = {fds265012}
}

@article{fds265013,
   Author = {Bar, L and Rumpf, M and Berkels, B and Sapiro, G},
   Title = {A variational framework for simultaneous motion estimation
             and restoration of motion-blurred video},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ICCV.2007.4409009},
   Abstract = {The problem of motion estimation and restoration of objects
             in a blurred video sequence is addressed in this paper. Fast
             movement of the objects, together with the aperture time of
             the camera, result in a motion-blurred image. The direct
             velocity estimation from this blurred video is inaccurate.
             On the other hand, an accurate estimation of the velocity of
             the moving objects is critical for restoration of
             motion-blurred video. Therefore, restoration needs accurate
             motion estimation and vice versa, and a joint process is
             called for. To address this problem we derive a novel model
             of the blurring process and propose a Mumford-Shah type of
             variational framework, acting on consecutive frames, for
             joint object deblurring and velocity estimation. The
             proposed procedure distinguishes between the moving object
             and the background and is accurate also close to the
             boundary of the moving object. Experimental results both on
             simulated and real data show the importance of this joint
             estimation and its superior performance when compared to the
             independent estimation of motion and restoration. ©2007
             IEEE.},
   Doi = {10.1109/ICCV.2007.4409009},
   Key = {fds265013}
}

@article{fds265004,
   Author = {Bartesaghi, A and Sprechmann, P and Randall, G and Sapiro, G and Subramaniam, S},
   Title = {Classification, averaging and reconstruction of
             macromolecules in electron tomography},
   Journal = {2007 4th IEEE International Symposium on Biomedical Imaging:
             From Nano to Macro - Proceedings},
   Pages = {244-247},
   Publisher = {IEEE},
   Year = {2007},
   Month = {November},
   url = {http://dx.doi.org/10.1109/ISBI.2007.356834},
   Abstract = {Electron tomography provides opportunities to determine
             diree-dimensional cellular architecture at resolutions high
             enough to identify individual macromolecules such as
             proteins. Image analysis of such data poses a challenging
             problem due to the extremely low signal-to-noise ratios that
             makes individual volumes simply too noisy to allow reliable
             structural interpretation. This requires using averaging
             techniques to boost the signal-to-noise ratios, a common
             practice in electron microscopy single particle analysis
             where they have proven to be very powerful in elucidating
             high resolution structure. Although there are significant
             similarities in the way data is processed, several new
             problems arise in the tomography case that have to be
             properly dealt with. Such problems involve dealing with the
             missing wedge characteristic of limited angle tomography,
             the need for robust and efficient 3D alignment routines, and
             design of mediods that account for diverse conformations
             through the use of classification. We present a framework
             for reconstruction via alignment, classification and
             averaging of volumes obtained from limited angle electron
             tomography, providing a powerful tool for high resolution
             structure determination and description of conformational
             variability in a biological context. © 2007
             IEEE.},
   Doi = {10.1109/ISBI.2007.356834},
   Key = {fds265004}
}

@article{fds265007,
   Author = {Aganj, I and Bartesaghi, A and Borgnia, M and Liao, HY and Sapiro, G and Subramaniam, S},
   Title = {Regularization for inverting the radon transform with wedge
             consideration},
   Journal = {2007 4th IEEE International Symposium on Biomedical Imaging:
             From Nano to Macro - Proceedings},
   Pages = {217-220},
   Publisher = {IEEE},
   Year = {2007},
   Month = {November},
   url = {http://dx.doi.org/10.1109/ISBI.2007.356827},
   Abstract = {In limited angle tomography, with applications such as
             electron microscopy, medical imaging, and industrial
             testing, the object of interest is scanned over a limited
             angular range, which is less than the full 180°
             mathematically required for density reconstruction. The use
             of standard full-range reconstruction algorithms produces
             results widi notorious "butterfly" or "wedge" artifacts. In
             this work we propose a reconstruction technique with a
             regularization term that takes into account the orientation
             of the missing angular range, also denoted as missing wedge.
             We show that a regularization diat penalizes non-uniformly
             in the orientation space produces reconstructions with less
             artifacts, thereby improving the recovery of the "invisible"
             edges due to the missing wedge. We present the underlying
             framework and results for a challenging phantom and real
             cryo-electron microscopy data. © 2007 IEEE.},
   Doi = {10.1109/ISBI.2007.356827},
   Key = {fds265007}
}

@article{fds265008,
   Author = {Narasimha, R and Aganj, I and Borgnia, M and Sapiro, G and McLaughlin,
             S and Milne, J and Subramaniam, S},
   Title = {From gigabytes to bytes: Automated denoising and feature
             identification in electron tomograms of intact bacterial
             cells},
   Journal = {2007 4th IEEE International Symposium on Biomedical Imaging:
             From Nano to Macro - Proceedings},
   Pages = {304-307},
   Publisher = {IEEE},
   Year = {2007},
   Month = {November},
   url = {http://dx.doi.org/10.1109/ISBI.2007.356849},
   Abstract = {Advances in automated data acquisition in electron
             tomography have led to an explosion in the amount of data
             that can be obtained about the spatial architecture of a
             variety of biologically and medically relevant objects with
             resolutions in the "nano" range of 10-1000 nm. The
             development of methods to automatically analyze the vast
             amounts of information contained in these tomograms is a
             major challenge since the electron tomograms are
             intrinsically very noisy. A fundamental step in the
             automatic analysis of large amounts of data for statistical
             inference is to segment relevant 3D features in cellular
             tomograms. Procedures for segmentation must work robustly
             and rapidly in spite of the low signal to noise ratios
             inherent to biological electron microscopy. This work first
             evaluates various non-linear denoising techniques on
             tomograms recorded at cryogenic temperatures. Using datasets
             of bacterial tomograms as an example, we demonstrate that
             non-linear diffusion techniques significantly improve the
             fidelity of automated feature extraction. Our approach
             represents an important step in automating the efficient
             extraction of useful information from large datasets in
             biological tomography, and facilitates the overall goal of
             speeding up the process of reducing gigabyte-sized tomograms
             to relevant byte-sized data. © 2007 IEEE.},
   Doi = {10.1109/ISBI.2007.356849},
   Key = {fds265008}
}

@article{fds265005,
   Author = {Duarte-Carvajalino, JM and Sapiro, G and Vélez-Reyes, M and Castillo, P},
   Title = {Fast multi-scale regularization and segmentation of
             hyperspectral imagery via anisotropic diffusion and
             Algebraic Multigrid solvers},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {6565},
   Publisher = {SPIE},
   Year = {2007},
   Month = {November},
   ISSN = {0277-786X},
   url = {http://dx.doi.org/10.1117/12.721036},
   Abstract = {This paper presents an algorithm that generates a
             scale-space representation of hyperspectral imagery using
             Algebraic Multigrid (AMG) solvers. The scale-space
             representation is obtained by solving with AMG a
             vector-valued anisotropic diffusion equation, with the
             hyperspectral image as its initial condition. AMG also
             provides the necessary structure to obtain a hierarchical
             segmentation of the image. The scale space representation of
             the hyperspectral image can be segmented in linear time
             complexity. Results in the paper show that improved
             segmentation is achieved. The proposed methodology to solve
             vector PDEs can be used to extend a number of techniques
             currently being developed for the fast computation of
             geometric PDEs and its application for the processing of
             hyperspectral and multispectral imagery.},
   Doi = {10.1117/12.721036},
   Key = {fds265005}
}

@article{fds265002,
   Author = {Haro, G and Randall, G and Sapiro, G},
   Title = {Regularized mixed dimensionality and density learning in
             computer vision},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Publisher = {IEEE},
   Year = {2007},
   Month = {October},
   ISSN = {1063-6919},
   url = {http://dx.doi.org/10.1109/CVPR.2007.383401},
   Abstract = {A framework for the regularized estimation of non-uniform
             dimensionality and density in high dimensional data is
             introduced in this work. This leads to learning
             stratifications, that is, mixture of manifolds representing
             different characteristics and complexities in the data set.
             The basic idea relies on modeling the high dimensional
             sample points as a process of Poisson mixtures, with
             regularizing restrictions and spatial continuity
             constraints. Theoretical asymptotic results for the model
             are presented as well, The presentation of the framework is
             complemented with artificial and real examples showing the
             importance of regularized stratification learning in
             computer vision applications. © 2007 IEEE.},
   Doi = {10.1109/CVPR.2007.383401},
   Key = {fds265002}
}

@article{fds265006,
   Author = {Arias, P and Randall, G and Sapiro, G},
   Title = {Connecting the out-of-sample and pre-image problems in
             Kernel methods},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Publisher = {IEEE},
   Year = {2007},
   Month = {October},
   ISSN = {1063-6919},
   url = {http://dx.doi.org/10.1109/CVPR.2007.383038},
   Abstract = {Kernel methods have been widely studied in the field of
             pattern recognition. These methods implicitly map, "the
             kernel trick," the data into a space which is more
             appropriate for analysis. Many manifold learning and
             dimensionality reduction techniques are simply kernel
             methods for which the mapping is explicitly computed. In
             such cases, two problems related with the mapping arise: The
             out-of-sample extension and the pre-image computation. In
             this paper we propose a new pre-image method based on the
             Nyström formulation for the out-of-sample extension,
             showing the connections between both problems. We also
             address the importance of normalization in the feature
             space, which has been ignored by standard pre-image
             algorithms. As an example, we apply these ideas to the
             Gaussian kernel, and relate our approach to other popular
             pre-image methods. Finally, we show the application of these
             techniques in the study of dynamic shapes. © 2007
             IEEE.},
   Doi = {10.1109/CVPR.2007.383038},
   Key = {fds265006}
}

@article{fds265019,
   Author = {Moenning, C and Mémoli, F and Sapiro, G and Dyn, N and Dodgson,
             NA},
   Title = {Meshless geometric subdivision},
   Journal = {Graphical Models},
   Volume = {69},
   Number = {3-4},
   Pages = {160-179},
   Publisher = {Elsevier BV},
   Year = {2007},
   Month = {May},
   ISSN = {1524-0703},
   url = {http://dx.doi.org/10.1016/j.gmod.2006.11.001},
   Abstract = {Point-based surface processing has developed into an
             attractive alternative to mesh-based processing tools for a
             number of geometric modeling applications. By working with
             point clouds directly, processing is based on the raw data
             and its underlying geometry rather than any arbitrary
             intermediate representations and generally artificial
             connectivity relations. We extend this principle into the
             area of subdivision surfaces by introducing the notion of
             meshless geometric subdivision. Our approach replaces the
             role of mesh connectivity with intrinsic point proximity
             thereby avoiding a number of limitations of mesh-based
             surface subdivision schemes. Apart from introducing this
             idea of meshless subdivision, we put forward a first
             intrinsic meshless subdivision scheme and present a new
             method for the computation of intrinsic means on Euclidean
             manifolds. © 2006 Elsevier Inc. All rights
             reserved.},
   Doi = {10.1016/j.gmod.2006.11.001},
   Key = {fds265019}
}

@article{fds265014,
   Author = {Kao, C-Y and Hofer, M and Sapiro, G and Stem, J and Rehm, K and Rottenberg,
             DA},
   Title = {A geometric method for automatic extraction of sulcal
             fundi.},
   Journal = {IEEE transactions on medical imaging},
   Volume = {26},
   Number = {4},
   Pages = {530-540},
   Year = {2007},
   Month = {April},
   ISSN = {0278-0062},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/17427740},
   Abstract = {Sulcal fundi are 3-D curves that lie in the depths of the
             cerebral cortex and, in addition to their intrinsic value in
             brain research, are often used as landmarks for downstream
             computations in brain imaging. In this paper, we present a
             geometric algorithm that automatically extracts the sulcal
             fundi from magnetic resonance images and represents them as
             spline curves lying on the extracted triangular mesh
             representing the cortical surface. The input to our
             algorithm is a triangular mesh representation of an
             extracted cortical surface as computed by one of several
             available software packages for performing automated and
             semi-automated cortical surface extraction. Given this input
             we first compute a geometric depth measure for each triangle
             on the cortical surface mesh, and based on this information
             we extract sulcal regions by checking for connected regions
             exceeding a depth threshold. We then identify endpoints of
             each region and delineate the fundus by thinning the
             connected region while keeping the endpoints fixed. The
             curves, thus, defined are regularized using weighted splines
             on the surface mesh to yield high-quality representations of
             the sulcal fundi. We present the geometric framework and
             validate it with real data from human brains. Comparisons
             with expert-labeled sulcal fundi are part of this validation
             process.},
   Doi = {10.1109/tmi.2006.886810},
   Key = {fds265014}
}

@article{fds265016,
   Author = {Protiere, A and Sapiro, G},
   Title = {Interactive image segmentation via adaptive weighted
             distances.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {16},
   Number = {4},
   Pages = {1046-1057},
   Year = {2007},
   Month = {April},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/17405436},
   Abstract = {An interactive algorithm for soft segmentation of natural
             images is presented in this paper. The user first roughly
             scribbles different regions of interest, and from them, the
             whole image is automatically segmented. This soft
             segmentation is obtained via fast, linear complexity
             computation of weighted distances to the user-provided
             scribbles. The adaptive weights are obtained from a series
             of Gabor filters, and are automatically computed according
             to the ability of each single filter to discriminate between
             the selected regions of interest. We present the underlying
             framework and examples showing the capability of the
             algorithm to segment diverse images.},
   Doi = {10.1109/tip.2007.891796},
   Key = {fds265016}
}

@article{fds265017,
   Author = {Mohan, A and Sapiro, G and Bosch, E},
   Title = {Spatially coherent nonlinear dimensionality reduction and
             segmentation of hyperspectral images},
   Journal = {IEEE Geoscience and Remote Sensing Letters},
   Volume = {4},
   Number = {2},
   Pages = {206-210},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2007},
   Month = {April},
   ISSN = {1545-598X},
   url = {http://dx.doi.org/10.1109/LGRS.2006.888105},
   Abstract = {The nonlinear dimensionality reduction and its effects on
             vector classification and segmentation of hyperspectral
             images are investigated in this letter. In particular, the
             way dimensionality reduction influences and helps
             classification and segmentation is studied. The proposed
             framework takes into account the nonlinear nature of
             high-dimensional hyperspectral images and projects onto a
             lower dimensional space via a novel spatially coherent
             locally linear embedding technique. The spatial coherence is
             introduced by comparing pixels based on their local
             surrounding structure in the image domain and not just on
             their individual values as classically done. This spatial
             coherence in the image domain across the multiple bands
             defines the high-dimensional local neighborhoods used for
             the dimensionality reduction. This spatial coherence concept
             is also extended to the segmentation and classification
             stages that follow the dimensionality reduction, introducing
             a modified vector angle distance. We present the underlying
             concepts of the proposed framework and experimental results
             showing the significant classification improvements. © 2007
             IEEE.},
   Doi = {10.1109/LGRS.2006.888105},
   Key = {fds265017}
}

@article{fds265015,
   Author = {Patwardhan, KA and Sapiro, G and Bertalmío, M},
   Title = {Video inpainting under constrained camera
             motion.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {16},
   Number = {2},
   Pages = {545-553},
   Year = {2007},
   Month = {February},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/17269646},
   Abstract = {A framework for inpainting missing parts of a video sequence
             recorded with a moving or stationary camera is presented in
             this work. The region to be inpainted is general: it may be
             still or moving, in the background or in the foreground, it
             may occlude one object and be occluded by some other object.
             The algorithm consists of a simple preprocessing stage and
             two steps of video inpainting. In the preprocessing stage,
             we roughly segment each frame into foreground and
             background. We use this segmentation to build three image
             mosaics that help to produce time consistent results and
             also improve the performance of the algorithm by reducing
             the search space. In the first video inpainting step, we
             reconstruct moving objects in the foreground that are
             "occluded" by the region to be inpainted. To this end, we
             fill the gap as much as possible by copying information from
             the moving foreground in other frames, using a
             priority-based scheme. In the second step, we inpaint the
             remaining hole with the background. To accomplish this, we
             first align the frames and directly copy when possible. The
             remaining pixels are filled in by extending spatial texture
             synthesis techniques to the spatiotemporal domain. The
             proposed framework has several advantages over
             state-of-the-art algorithms that deal with similar types of
             data and constraints. It permits some camera motion, is
             simple to implement, fast, does not require statistical
             models of background nor foreground, works well in the
             presence of rich and cluttered backgrounds, and the results
             show that there is no visible blurring or motion artifacts.
             A number of real examples taken with a consumer hand-held
             camera are shown supporting these findings.},
   Doi = {10.1109/tip.2006.888343},
   Key = {fds265015}
}

@article{fds264784,
   Author = {Bartesaghi, A and Sprechmann, P and Randall, G and Sapiro, G and Subramaniam, S},
   Title = {A framework for classification and averaging of 3D
             tomographic volumes},
   Journal = {BIOPHYSICAL JOURNAL},
   Pages = {509A-509A},
   Publisher = {BIOPHYSICAL SOCIETY},
   Year = {2007},
   Month = {January},
   ISSN = {0006-3495},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000243972403228&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264784}
}

@article{fds264997,
   Author = {Xue, B and Guillermo, S},
   Title = {Distancecut: Interactive segmentation and matting of images
             and videos},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {2},
   Pages = {249-252},
   Publisher = {IEEE},
   Year = {2007},
   Month = {January},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2007.4379139},
   Abstract = {An interactive algorithm for soft segmentation and matting
             of natural Images and videos is presented in this paper. The
             technique follows and extends [10], where the user first
             roughly scribbles/labels different regions of interest, and
             from them the whole data is automatically segmented. The
             segmentation and alpha matte are obtained from the fast,
             linear complexity, computation of weighted distances to the
             user-provided scribbles. These weighted distances assign
             probabilities to each labeled class for every pixel. The
             weights are derived from models of the image regions
             obtained from the user provided scribbles via kernel density
             estimation. The matting results follow from combining this
             density and the computed weighted distances. We present the
             underlying framework and examples showing the capability of
             the algorithm to segment and compute alpha mattes, in
             interactive real time, for difficult natural data. © 2007
             IEEE.},
   Doi = {10.1109/ICIP.2007.4379139},
   Key = {fds264997}
}

@article{fds264998,
   Author = {Patwardhan, KA and Sapiro, G and Morellas, V},
   Title = {A graph-based foreground representation and its application
             in example based people matching in video},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {5},
   Pages = {V37-V40},
   Publisher = {IEEE},
   Year = {2007},
   Month = {January},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2007.4379759},
   Abstract = {In this work, we propose a framework for foreground
             representation, in video and illustrate it with a
             multi-camera people matching application. We first decompose
             the video into foreground and back-ground. A low-level
             coarse segmentation of the foreground is then used to
             generate a simple graph representation. A vertex in the
             graph represents the "appearance" of a corresponding segment
             in the foreground, while the relationship between, two
             segments is encoded by an edge between the corresponding
             vertices. This provides a simple yet powerful and general
             representation, of the foreground, which can be very useful
             in problems such as people detection and tracking. We
             illustrate the effectiveness of this model using an "example
             based query" type of application for people matching in
             videos. Matching results are provided in multiple-camera
             situations and also under occlusion. © 2007
             IEEE.},
   Doi = {10.1109/ICIP.2007.4379759},
   Key = {fds264998}
}

@article{fds265009,
   Author = {Sundaramoorthi, G and Yezzi, A and Mennucci, AC and Sapiro,
             G},
   Title = {New possibilities with Sobolev active contours},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {4485 LNCS},
   Pages = {153-164},
   Year = {2007},
   Month = {January},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-540-72823-8_14},
   Abstract = {Recently, the Sobolev metric was introduced to define
             gradient flows of various geometric active contour energies.
             It was shown that the Sobolev metric out-performs the
             traditional metric for the same energy in many cases such as
             for tracking where the coarse scale changes of the contour
             are important. Some interesting properties of Sobolev
             gradient flows are that they stabilize certain unstable
             traditional flows, and the order of the evolution PDEs are
             reduced when compared with traditional gradient flows of the
             same energies. In this paper, we explore new possibilities
             for active contours made possible by Sobolev active
             contours. The Sobolev method allows one to implement new
             energy-based active contour models that were not otherwise
             considered because the traditional minimizing method cannot
             be used. In particular, we exploit the stabilizing and the
             order reducing properties of Sobolev gradients to implement
             the gradient descent of these new energies. We give examples
             of this class of energies, which include some simple
             geometric priors and new edge-based energies. We will show
             that these energies can be quite useful for segmentation and
             tracking. We will show that the gradient flows using the
             traditional metric are either ill-posed or numerically
             difficult to implement, and then show that the flows can be
             implemented in a stable and numerically feasible manner
             using the Sobolev gradient. © Springer-Verlag Berlin
             Heidelberg 2007.},
   Doi = {10.1007/978-3-540-72823-8_14},
   Key = {fds265009}
}

@article{fds264758,
   Author = {Mairal, J and Sapiro, G and Elad, M},
   Title = {Multiscale sparse image representation with learned
             dictionaries},
   Journal = {2007 IEEE INTERNATIONAL CONFERENCE ON IMAGE PROCESSING, VOLS
             1-7},
   Pages = {1233-+},
   Year = {2007},
   ISBN = {978-1-4244-1436-9},
   ISSN = {1522-4880},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000253487201068&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264758}
}

@article{fds264776,
   Author = {Sapiro, G},
   Title = {I never learned to write - The paths of creation},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {168},
   Pages = {12-+},
   Year = {2007},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000246990400002&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264776}
}

@article{fds264780,
   Author = {Bai, X and Sapiro, G},
   Title = {Distancecut: Interactive segmentation and matting of images
             and videos},
   Journal = {2007 IEEE INTERNATIONAL CONFERENCE ON IMAGE PROCESSING, VOLS
             1-7},
   Pages = {813-816},
   Year = {2007},
   ISBN = {978-1-4244-1436-9},
   ISSN = {1522-4880},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000253487200204&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264780}
}

@article{fds264806,
   Author = {Sapiro, G},
   Title = {The artistic vocation between donation and
             self-donation},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {168},
   Pages = {4-11},
   Year = {2007},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000246990400001&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264806}
}

@article{fds264824,
   Author = {Patwardhan, KA and Sapiro, G and Morellas, V},
   Title = {A graph-based foreground representation and its application
             in example based people matching in video},
   Journal = {2007 IEEE INTERNATIONAL CONFERENCE ON IMAGE PROCESSING, VOLS
             1-7},
   Pages = {2289-+},
   Year = {2007},
   ISBN = {978-1-4244-1436-9},
   ISSN = {1522-4880},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000253487201332&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264824}
}

@article{fds264825,
   Author = {Sapiro, G},
   Title = {The ideology of the donation},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {168},
   Pages = {31-33},
   Year = {2007},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000246990400003&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264825}
}

@article{fds264847,
   Author = {Rother, D and Patwardhan, KA and Sapiro, G},
   Title = {What Can Casual Walkers Tell Us About A 3D
             Scene?},
   Journal = {2007 IEEE 11th International Conference on Computer
             Vision},
   Pages = {1-8},
   Publisher = {IEEE},
   Year = {2007},
   ISBN = {9781424416301},
   url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=4408818},
   Doi = {10.1109/iccv.2007.4409082},
   Key = {fds264847}
}

@article{fds264724,
   Author = {Bertalmío, M and Caselles, V and Haro, G and Sapiro,
             G},
   Title = {PDE-based image and surface inpainting},
   Pages = {33-61},
   Publisher = {Springer Verlag},
   Year = {2006},
   Month = {December},
   url = {http://dx.doi.org/10.1007/0-387-28831-7_3},
   Abstract = {Inpainting, the technique of modifying an image in an
             undetectable form, is as ancient as art itself. The goals
             and applications of inpainting are numerous, from the
             restoration of damaged paintings, photographs and films, to
             the removal of selected undesirable objects. This chapter is
             intended to present an overview of PDE based image
             inpainting algorithms, with emphasis in models developed by
             the authors. These models are based on the propagation of
             information along the image isophotes and on the
             minimization of an energy functional which follows a
             relaxation of the Elastica model. This last variational
             formulation can be easily extended to 3D to fill holes in
             surfaces, a problem closely related to image inpainting.
             Basic PDE-based approaches to inpainting share the
             shortcoming that they cannot restore texture, so
             combinations of these algorithms with texture synthesis
             techniques are also discussed. Some results are shown for
             applications such as removal of text, restoration of
             scratched photographs, removal of selected objects and
             reconstruction of 3D surfaces with holes. Other recent
             approaches to the image inpainting problem are also briefly
             reviewed. © 2006 Springer Science+Business Media,
             Inc.},
   Doi = {10.1007/0-387-28831-7_3},
   Key = {fds264724}
}

@article{fds264854,
   Author = {Hofer, M and Sapiro, G and Wallner, J},
   Title = {Fair polyline networks for constrained smoothing of digital
             terrain elevation data},
   Journal = {IEEE Transactions on Geoscience and Remote
             Sensing},
   Volume = {44},
   Number = {10},
   Pages = {2983-2990},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2006},
   Month = {December},
   url = {http://dx.doi.org/10.1109/TGRS.2006.875451},
   Abstract = {In this paper, a framework for smoothing gridlike digital
             terrain elevation data, which achieves a fair shape bymeans
             of minimizing an energy functional, is presented. The
             minimization is performed under the side condition of hard
             constraints, which comes from available horizontal and
             vertical accuracy bounds in the standard elevation
             specification. In this paper, the framework is introduced,
             and the suitability of this method for the tasks of
             accuracy-constrained smoothing, feature-preserving
             smoothing, and filling of data voids is demonstrated. ©
             2006 IEEE.},
   Doi = {10.1109/TGRS.2006.875451},
   Key = {fds264854}
}

@article{fds264995,
   Author = {Mairal, J and Sapiro, G and Elad, M},
   Title = {Multiscale sparse image representation with learned
             dictionaries},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {3},
   Pages = {III105-III108},
   Publisher = {IEEE},
   Year = {2006},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2007.4379257},
   Abstract = {This paper introduces a new framework for learning
             multiscale sparse representations of natural images with
             overcomplete dictionaries. Our work extends the K-SVD
             algorithm [1], which learns sparse single-scale dictionaries
             for natural images. Recent work has shown that the K-SVD can
             lead to state-of-the-art image restoration results [2, 3].
             We show that these are further improved with a multiscale
             approach, based on a Quadtree decomposition. Our framework
             provides an alternative to multiscale pre-defined
             dictionaries such as wavelets, curvelets, and contourlets,
             with dictionaries optimized for the data and application
             instead of pre-modelled ones. © 2007 IEEE.},
   Doi = {10.1109/ICIP.2007.4379257},
   Key = {fds264995}
}

@article{fds264996,
   Author = {Kao, CY and Hofer, M and Sapiro, G and Stern, J and Rottenberg,
             DA},
   Title = {A geometric method for automatic extraction of sulcal
             fundi},
   Journal = {2006 3rd IEEE International Symposium on Biomedical Imaging:
             From Nano to Macro - Proceedings},
   Volume = {2006},
   Pages = {1168-1171},
   Year = {2006},
   Month = {November},
   Abstract = {Sulcal fundi are 3D curves that lie in the depths of the
             cerebral cortex and are often used as landmarks for
             downstream computations in brain imaging. We present a
             sequence of geometric algorithms which automatically extract
             the sulcal fundi from magnetic resonance images and
             represent them as smooth polygons lying on the cortical
             surface. First we compute a geometric depth measure for each
             point on the cortical surface, and based on this information
             we extract sulcal regions by checking the connectivity above
             a depth threshold. We then extract the endpoints of each
             fundus and delineate the fundus by thinning each connected
             region keeping the endpoints fixed. The curves thus defined
             are smoothed using weighted splines on the gray-matter
             surface to yield high-quality representations of the sulcal
             fundi. © 2006 IEEE.},
   Key = {fds264996}
}

@article{fds264761,
   Author = {Sapiro, G},
   Title = {JANE F. FULCHER. The Composer as Intellectual: Music and
             Ideology in France 1914-1940. New York: Oxford University
             Press. 2005. Pp. xiv, 473. $74.00},
   Journal = {The American Historical Review},
   Volume = {111},
   Number = {4},
   Pages = {1261-1262},
   Publisher = {Oxford University Press (OUP)},
   Year = {2006},
   Month = {October},
   ISSN = {0002-8762},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000241473300156&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1086/ahr.111.4.1261},
   Key = {fds264761}
}

@article{fds265003,
   Author = {Greer, JB and Bertozzi, AL and Sapiro, G},
   Title = {Fourth order partial differential equations on general
             geometries},
   Journal = {Journal of Computational Physics},
   Volume = {216},
   Number = {1},
   Pages = {216-246},
   Publisher = {Elsevier BV},
   Year = {2006},
   Month = {July},
   ISSN = {0021-9991},
   url = {http://dx.doi.org/10.1016/j.jcp.2005.11.031},
   Abstract = {We extend a recently introduced method for numerically
             solving partial differential equations on implicit surfaces
             [M. Bertalmío, L.T. Cheng, S. Osher, G. Sapiro. Variational
             problems and partial differential equations on implicit
             surfaces, J. Comput. Phys. 174 (2) (2001) 759-780] to fourth
             order PDEs including the Cahn-Hilliard equation and a
             lubrication model for curved surfaces. By representing a
             surface in RN as the level set of a smooth function,
             φ{symbol}, we compute the PDE using only finite differences
             on a standard Cartesian mesh in RN. The higher order
             equations introduce a number of challenges that are of less
             concern when applying this method to first and second order
             PDEs. Many of these problems, such as time-stepping
             restrictions and large stencil sizes, are shared by standard
             fourth order equations in Euclidean domains, but others are
             caused by the extreme degeneracy of the PDEs that result
             from this method and the general geometry. We approach these
             difficulties by applying convexity splitting methods, ADI
             schemes, and iterative solvers. We discuss in detail the
             differences between computing these fourth order equations
             and computing the first and second order PDEs considered in
             earlier work. We explicitly derive schemes for the linear
             fourth order diffusion, the Cahn-Hilliard equation for phase
             transition in a binary alloy, and surface tension driven
             flows on complex geometries. Numerical examples validating
             our methods are presented for these flows for data on
             general surfaces. © 2005 Elsevier Inc. All rights
             reserved.},
   Doi = {10.1016/j.jcp.2005.11.031},
   Key = {fds265003}
}

@article{fds265001,
   Author = {Lee, M and Lloyd, P and Zhang, X and Schallhorn, JM and Sugimoto, K and Leach, AG and Sapiro, G and Houk, KN},
   Title = {Shapes of antibody binding sites: qualitative and
             quantitative analyses based on a geomorphic classification
             scheme.},
   Journal = {The Journal of organic chemistry},
   Volume = {71},
   Number = {14},
   Pages = {5082-5092},
   Year = {2006},
   Month = {July},
   ISSN = {0022-3263},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/16808494},
   Abstract = {The topography of antibody binding sites has been classified
             into five types that evoke familiar geomorphic features of
             the Earth. The 229 antibody crystal structures from the
             Protein Data Bank were analyzed and classified into these
             classes. Relationships to previous topography
             classifications by Rees et al., who defined three classes,
             and Thornton et al., who defined four classes, are
             identified. An algorithm was developed to identify the
             antibody binding site class automatically based on the
             definition and the shape of the binding site. A
             three-dimensional convex hull was formed around the
             complementarity determining regions (CDRs) of the antibody.
             The convex hull was then "trimmed" to fit the binding site
             by using distance criteria and morphological techniques.
             Once the program identified the binding site shape, a
             statistical and distance based analysis was performed to
             classify automatically the antibody into one of the five
             geomorphic classes. The five antibody topography classes are
             as follows: cave (mostly hapten binders), crater (mostly
             protein and peptide/carbohydrate/nucleic acid binders),
             canyon, valley, and plain (mostly protein binders).
             Comparisons of the binding sites of empty and of complexed
             antibody binding sites gave an indication of how the shape
             of the binding site is influenced by binding of the
             antigen.},
   Doi = {10.1021/jo052659z},
   Key = {fds265001}
}

@article{fds265000,
   Author = {Yatziv, L and Sapiro, G},
   Title = {Fast image and video colorization using chrominance
             blending.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {15},
   Number = {5},
   Pages = {1120-1129},
   Year = {2006},
   Month = {May},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/16671293},
   Abstract = {Colorization, the task of coloring a grayscale image or
             video, involves assigning from the single dimension of
             intensity or luminance a quantity that varies in three
             dimensions, such as red, green, and blue channels. Mapping
             between intensity and color is, therefore, not unique, and
             colorization is ambiguous in nature and requires some amount
             of human interaction or external information. A
             computationally simple, yet effective, approach of
             colorization is presented in this paper. The method is fast
             and it can be conveniently used "on the fly," permitting the
             user to interactively get the desired results promptly after
             providing a reduced set of chrominance scribbles. Based on
             the concepts of luminance-weighted chrominance blending and
             fast intrinsic distance computations, high-quality
             colorization results for still images and video are obtained
             at a fraction of the complexity and computational cost of
             previously reported techniques. Possible extensions of the
             algorithm introduced here included the capability of
             changing the colors of an existing color image or video, as
             well as changing the underlying luminance, and many other
             special effects demonstrated here.},
   Doi = {10.1109/tip.2005.864231},
   Key = {fds265000}
}

@article{fds264993,
   Author = {Rathi, Y and Olver, P and Sapiro, G and Tannenbaum,
             A},
   Title = {Affine invariant surface evolutions for 3D image
             segmentation},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {6064},
   Publisher = {SPIE},
   Year = {2006},
   Month = {April},
   ISSN = {0277-786X},
   url = {http://dx.doi.org/10.1117/12.640282},
   Abstract = {In this paper we present an algorithm for 3D medical image
             segmentation based on an affine invariant flow. The
             algorithm is simple to implement and semi-automatic. The
             technique is based on active contours evolving in time
             according to intrinsic geometric measures of the image. The
             surface flow is obtained by minimizing a global energy with
             respect to an affine invariant metric. Affine invariant edge
             detectors for 3-dimensional objects are also computed which
             have the same qualitative behavior as the Euclidean edge
             detectors. Results on artificial and real MRI images show
             that the algorithm performs well, both in terms of accuracy
             and robustness to noise. © 2006 SPIE-IS&T.},
   Doi = {10.1117/12.640282},
   Key = {fds264993}
}

@article{fds264999,
   Author = {Yatziv, L and Bartesaghi, A and Sapiro, G},
   Title = {O(N) implementation of the fast marching
             algorithm},
   Journal = {Journal of Computational Physics},
   Volume = {212},
   Number = {2},
   Pages = {393-399},
   Publisher = {Elsevier BV},
   Year = {2006},
   Month = {March},
   url = {http://dx.doi.org/10.1016/j.jcp.2005.08.005},
   Abstract = {In this note we present an implementation of the fast
             marching algorithm for solving Eikonal equations that in
             practice reduces the original run-time from O(N log N) to
             linear. This lower run-time cost is obtained while keeping
             an error bound of the same order of magnitude as the
             original algorithm. This improvement is achieved introducing
             the straight forward untidy priority queue, obtained via a
             quantization of the priorities in the marching computation.
             We present the underlying framework, estimations on the
             error, and examples showing the usefulness of the proposed
             approach. © 2005 Elsevier Inc. All rights
             reserved.},
   Doi = {10.1016/j.jcp.2005.08.005},
   Key = {fds264999}
}

@article{fds264807,
   Author = {Sapiro, G},
   Title = {Les professions intellectuelles entre l'État,
             l'entrepreneuriat et l'industrie},
   Journal = {Le Mouvement Social},
   Volume = {n o 214},
   Number = {1},
   Pages = {3-18},
   Publisher = {CAIRN},
   Year = {2006},
   Month = {January},
   ISSN = {0027-2671},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000235640600001&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.3917/lms.214.0003},
   Key = {fds264807}
}

@article{fds264992,
   Author = {Hershkovitz, E and Sapiro, G and Tannenbaum, A and Williams,
             LD},
   Title = {Statistical analysis of RNA backbone.},
   Journal = {IEEE/ACM transactions on computational biology and
             bioinformatics},
   Volume = {3},
   Number = {1},
   Pages = {33-46},
   Year = {2006},
   Month = {January},
   ISSN = {1545-5963},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/17048391},
   Abstract = {Local conformation is an important determinant of RNA
             catalysis and binding. The analysis of RNA conformation is
             particularly difficult due to the large number of degrees of
             freedom (torsion angles) per residue. Proteins, by
             comparison, have many fewer degrees of freedom per residue.
             In this work, we use and extend classical tools from
             statistics and signal processing to search for clusters in
             RNA conformational space. Results are reported both for
             scalar analysis, where each torsion angle is separately
             studied, and for vectorial analysis, where several angles
             are simultaneously clustered. Adapting techniques from
             vector quantization and clustering to the RNA structure, we
             find torsion angle clusters and RNA conformational motifs.
             We validate the technique using well-known conformational
             motifs, showing that the simultaneous study of the total
             torsion angle space leads to results consistent with known
             motifs reported in the literature and also to the finding of
             new ones.},
   Doi = {10.1109/tcbb.2006.13},
   Key = {fds264992}
}

@article{fds264994,
   Author = {Mohan, A and Bartesaghi, A and Sapiro, G},
   Title = {Constrained regularization of digital terrain elevation
             data},
   Journal = {IEEE Geoscience and Remote Sensing Letters},
   Volume = {3},
   Number = {1},
   Pages = {59-62},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2006},
   Month = {January},
   ISSN = {1545-598X},
   url = {http://dx.doi.org/10.1109/LGRS.2005.856702},
   Abstract = {A framework for geometric regularization of elevation maps
             is introduced in this letter. The framework takes into
             account errors in the data, which form part of standard
             elevation maps specifications, as well as possible
             additional user/application-dependent constraints. The
             algorithm is based on adapting the theory of geometric
             active surfaces to the problem of regularizing elevation
             maps. We present the underlying concepts and numerical
             experiments showing the effectiveness and potential of this
             theory. © 2006 IEEE.},
   Doi = {10.1109/LGRS.2005.856702},
   Key = {fds264994}
}

@article{fds370936,
   Author = {Haro, G and Randall, G and Sapiro, G},
   Title = {Stratification Learning: Detecting Mixed Density and
             Dimensionality in High Dimensional Point
             Clouds},
   Journal = {NIPS 2006: Proceedings of the 19th International Conference
             on Neural Information Processing Systems},
   Pages = {553-560},
   Year = {2006},
   Month = {January},
   ISBN = {9780262195683},
   Abstract = {The study of point cloud data sampled from a stratification,
             a collection of manifolds with possible different
             dimensions, is pursued in this paper. We present a technique
             for simultaneously soft clustering and estimating the mixed
             dimensionality and density of such structures. The framework
             is based on a maximum likelihood estimation of a Poisson
             mixture model. The presentation of the approach is completed
             with artificial and real examples demonstrating the
             importance of extending manifold learning to stratification
             learning.},
   Key = {fds370936}
}

@article{fds264750,
   Author = {Sapiro, G and Gobille, B},
   Title = {Proprietor or intellectual worker ? French writers in search
             for a status.},
   Journal = {MOUVEMENT SOCIAL},
   Number = {214},
   Pages = {113-+},
   Year = {2006},
   ISSN = {0027-2671},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000235640600007&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264750}
}

@article{fds264771,
   Author = {Sapiro, G},
   Title = {The price of independence},
   Journal = {QUINZAINE LITTERAIRE},
   Number = {919},
   Pages = {6-8},
   Year = {2006},
   ISSN = {0048-6493},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000236787400003&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264771}
}

@article{fds264783,
   Author = {Sapiro, G},
   Title = {The composer as intellectual. Music and ideology in France
             (1914-1940)},
   Journal = {QUINZAINE LITTERAIRE},
   Number = {926},
   Pages = {26-26},
   Year = {2006},
   ISSN = {0048-6493},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000239293900024&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264783}
}

@article{fds264796,
   Author = {Sapiro, G},
   Title = {Emigrants in New York. The French intellectuals in Manhattan
             (1940-1944).},
   Journal = {MOUVEMENT SOCIAL},
   Number = {214},
   Pages = {174-175},
   Year = {2006},
   ISSN = {0027-2671},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000235640600013&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264796}
}

@article{fds264822,
   Author = {Sapiro, G},
   Title = {Intellectuals. The genre in the history of
             intellectuals.},
   Journal = {MOUVEMENT SOCIAL},
   Number = {214},
   Pages = {168-170},
   Year = {2006},
   ISSN = {0027-2671},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000235640600010&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264822}
}

@article{fds264851,
   Author = {Hershkovitz, E and Sapiro, G and Tannenbaum, AR and Williams,
             LD},
   Title = {Statistical Analysis of RNA Backbone.},
   Journal = {IEEE ACM Trans. Comput. Biol. Bioinform.},
   Volume = {3},
   Pages = {33-46},
   Year = {2006},
   url = {http://dx.doi.org/10.1145/1113896.1113900},
   Doi = {10.1145/1113896.1113900},
   Key = {fds264851}
}

@article{fds264984,
   Author = {Breen, D and Kirby, M and Lefohn, A and Museth, K and Preusser, T and Sapiro, G and Whitaker, R},
   Title = {Level set and PDE methods for visualization},
   Journal = {Proceedings of the IEEE Visualization Conference},
   Pages = {125},
   Publisher = {IEEE},
   Year = {2005},
   Month = {December},
   url = {http://dx.doi.org/10.1109/VIS.2005.65},
   Abstract = {Level set methods, an important class of partial
             differential equation (PDE) methods, define dynamic surfaces
             implicitly as the level set (iso-surface) of a sampled,
             evolving nD function. This course is targeted for
             researchers interested in learning about level set and other
             PDE-based methods, and their application to visualization.
             The course material will be presented by several of the
             recognized experts in the field, and will include
             introductory concepts, practical considerations and
             extensive details on a variety of level set/PDE
             applications. The course will begin with preparatory
             material that introduces the concept of using partial
             differential equations to solve problems in visualization.
             This will include the structure and behavior of several
             different types of differential equations, e.g. the level
             set, heat and reaction-diffusion equations, as well as a
             general approach to developing PDE-based applications. The
             second stage of the course will describe the numerical
             methods and algorithms needed to implement the mathematics
             and methods presented in the first stage, including
             information on implementing the algorithms on GPUs.
             Throughout the course the technical material will be tied to
             applications, e.g. image processing, geometric modeling,
             dataset segmentation, model processing, surface
             reconstruction, anisotropic geometric diffusion, flow field
             post-processing and vector visualization.},
   Doi = {10.1109/VIS.2005.65},
   Key = {fds264984}
}

@article{fds264985,
   Author = {Sapiro, G},
   Title = {Inpainting the colors},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {2},
   Pages = {698-701},
   Publisher = {IEEE},
   Year = {2005},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2005.1530151},
   Abstract = {A framework for automatic image colorization, the art of
             adding color to a monochrome image or movie, is presented in
             this paper. The approach is based on considering the
             geometry and structure of the monochrome luminance input,
             given by its gradient information, as representing the
             geometry and structure of the whole colored version. The
             color is then obtained by solving a partial differential
             equation that propagates a few color scribbles provided by
             the user or by side information, while considering the
             gradient information brought in by the monochrome data. This
             way, the color is inpainted, constrained both by the
             monochrome image geometry and the provided color samples. We
             present the underlying framework and examples for still
             images and movies. © 2005 IEEE.},
   Doi = {10.1109/ICIP.2005.1530151},
   Key = {fds264985}
}

@article{fds264986,
   Author = {Bartesaghi, A and Sapiro, G},
   Title = {Tracking of moving objects under severe and total
             occlusions},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {1},
   Pages = {301-304},
   Publisher = {IEEE},
   Year = {2005},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2005.1529747},
   Abstract = {We present an algorithm for tracking moving objects using
             intrinsic minimal surfaces which handles particularly well
             the presence of severe and total occlusions even in the
             presence of weak object boundaries. We adopt an edge based
             approach and find the segmentation as a minimal surface in
             3D space-time, the metric being dictated by the image
             gradient. Object boundaries are represented implicitly as
             the level set of a higher dimensional function, and no
             particular object model is assumed. We also avoid explicit
             estimation of a dynamic model since the problem is regarded
             as one of static energy minimization. A set of interior
             points provided by the user is used to constrain the
             optimization, which basically corresponds to selecting the
             object of interest within the video sequence. The
             constraints are such that they restrict the resulting
             surface to be star-shaped in the 3D spatio-temporal space.
             We present some challenging examples that show the
             robustness of the technique. © 2005 IEEE.},
   Doi = {10.1109/ICIP.2005.1529747},
   Key = {fds264986}
}

@article{fds264987,
   Author = {Patwardhan, KA and Sapiro, G and Benalmio, M},
   Title = {Video inpainting of occluding and occluded
             objects},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {2},
   Pages = {69-72},
   Publisher = {IEEE},
   Year = {2005},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2005.1529993},
   Abstract = {We present a basic technique to fill-in missing parts of a
             video sequence taken from a static camera. Two important
             cases are considered. The first case is concerned with the
             removal of non-stationary objects that occlude stationary
             background. We use a priority based spatio-temporal
             synthesis scheme for inpainting the stationary background.
             The second and more difficult case involves filling-in
             moving objects when they are partially occluded. For this,
             we propose a priority scheme to first inpaint the occluded
             moving objects and then fill-in the remaining area with
             stationary background using the method proposed for the
             first case. We use as input an optical-flow based mask,
             which tells if an undamaged pixel is moving or is
             stationary. The moving object is inpainted by copying
             patches from undamaged frames, and this copying is
             independent of the background of the moving object in either
             frame. This work has applications in a variety of different
             areas, including video special effects and restoration and
             enhancement of damaged videos. The examples shown in the
             paper illustrate these ideas. © 2005 IEEE.},
   Doi = {10.1109/ICIP.2005.1529993},
   Key = {fds264987}
}

@article{fds264990,
   Author = {Martín, A and Sapiro, G and Seroussi, G},
   Title = {Is image steganography natural?},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {14},
   Number = {12},
   Pages = {2040-2050},
   Year = {2005},
   Month = {December},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/16370457},
   Abstract = {Steganography is the art of secret communication. Its
             purpose is to hide the presence of information, using, for
             example, images as covers. We experimentally investigate if
             stego-images, bearing a secret message, are statistically
             "natural." For this purpose, we use recent results on the
             statistics of natural images and investigate the effect of
             some popular steganography techniques. We found that these
             fundamental statistics of natural images are, in fact,
             generally altered by the hidden "nonnatural" information.
             Frequently, the change is consistently biased in a given
             direction. However, for the class of natural images
             considered, the change generally falls within the intrinsic
             variability of the statistics, and, thus, does not allow for
             reliable detection, unless knowledge of the data hiding
             process is taken into account. In the latter case,
             significant levels of detection are demonstrated.},
   Doi = {10.1109/tip.2005.859370},
   Key = {fds264990}
}

@article{fds264991,
   Author = {Mahmoudi, M and Sapiro, G},
   Title = {Fast image and video denoising via nonlocal means of similar
             neighborhoods},
   Journal = {IEEE Signal Processing Letters},
   Volume = {12},
   Number = {12},
   Pages = {839-842},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2005},
   Month = {December},
   ISSN = {1070-9908},
   url = {http://dx.doi.org/10.1109/LSP.2005.859509},
   Abstract = {In this letter, improvements to the nonlocal means image
             denoising method introduced by Buades et al. are presented.
             The original nonlocal means method replaces a noisy pixel by
             the weighted average of pixels with related surrounding
             neighborhoods. While producing state-of-the-art denoising
             results, this method is computationally impractical. In
             order to accelerate the algorithm, we introduce filters that
             eliminate unrelated neighborhoods from the weighted average.
             These filters are based on local average gray values and
             gradients, preclassifying neighborhoods and thereby reducing
             the original quadratic complexity to a linear one and
             reducing the influence of less-related areas in the
             denoising of a given pixel. We present the underlying
             framework and experimental results for gray level and color
             images as well as for video. © 2005 IEEE.},
   Doi = {10.1109/LSP.2005.859509},
   Key = {fds264991}
}

@article{fds264982,
   Author = {Mémoli, F and Sapiro, G},
   Title = {Distance functions and geodesics on submanifolds of ℝ
             d and point clouds},
   Journal = {SIAM Journal on Applied Mathematics},
   Volume = {65},
   Number = {4},
   Pages = {1227-1260},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2005},
   Month = {September},
   ISSN = {0036-1399},
   url = {http://dx.doi.org/10.1137/S003613990342877X},
   Abstract = {A theoretical and computational framework for computing
             intrinsic distance functions and geodesics on submanifolds
             of ℝ d given by point clouds is introduced and developed
             in this paper. The basic idea is that, as shown here,
             intrinsic distance functions and geodesics on general
             co-dimension submanifolds of ℝ d can be accurately
             approximated by extrinsic Euclidean ones computed inside a
             thin offset band surrounding the manifold. This permits the
             use of computationally optimal algorithms for computing
             distance functions in Cartesian grids. We use these
             algorithms, modified to deal with spaces with boundaries,
             and obtain a computationally optimal approach also for the
             case of intrinsic distance functions on submanifolds of ℝ
             d. For point clouds, the offset band is constructed without
             the need to explicitly find the underlying manifold, thereby
             computing intrinsic distance functions and geodesics on
             point clouds while skipping the manifold reconstruction
             step. The case of point clouds representing noisy samples of
             a submanifold of Euclidean space is studied as well. All the
             underlying theoretical results are presented along with
             experimental examples for diverse applications and
             comparisons to graph-based distance algorithms. © 2005
             Society for Industrial and Applied Mathematics.},
   Doi = {10.1137/S003613990342877X},
   Key = {fds264982}
}

@article{fds264989,
   Author = {Bartesaghi, A and Sapiro, G and Subramaniam, S},
   Title = {An energy-based three-dimensional segmentation approach for
             the quantitative interpretation of electron
             tomograms.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {14},
   Number = {9},
   Pages = {1314-1323},
   Year = {2005},
   Month = {September},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/16190467},
   Abstract = {Electron tomography allows for the determination of the
             three-dimensional structures of cells and tissues at
             resolutions significantly higher than that which is possible
             with optical microscopy. Electron tomograms contain, in
             principle, vast amounts of information on the locations and
             architectures of large numbers of subcellular assemblies and
             organelles. The development of reliable quantitative
             approaches for the analysis of features in tomograms is an
             important problem, and a challenging prospect due to the low
             signal-to-noise ratios that are inherent to biological
             electron microscopic images. This is, in part, a consequence
             of the tremendous complexity of biological specimens. We
             report on a new method for the automated segmentation of HIV
             particles and selected cellular compartments in electron
             tomograms recorded from fixed, plastic-embedded sections
             derived from HIV-infected human macrophages. Individual
             features in the tomogram are segmented using a novel robust
             algorithm that finds their boundaries as global minimal
             surfaces in a metric space defined by image features. The
             optimization is carried out in a transformed spherical
             domain with the center an interior point of the particle of
             interest, providing a proper setting for the fast and
             accurate minimization of the segmentation energy. This
             method provides tools for the semi-automated detection and
             statistical evaluation of HIV particles at different stages
             of assembly in the cells and presents opportunities for
             correlation with biochemical markers of HIV infection. The
             segmentation algorithm developed here forms the basis of the
             automated analysis of electron tomograms and will be
             especially useful given the rapid increases in the rate of
             data acquisition. It could also enable studies of much
             larger data sets, such as those which might be obtained from
             the tomographic analysis of HIV-infected cells from studies
             of large populations.},
   Doi = {10.1109/tip.2005.852467},
   Key = {fds264989}
}

@article{fds264988,
   Author = {Bartesaghi, A and Sapiro, G and Malzbender, T and Gelb,
             D},
   Title = {Three-dimensional shape rendering from multiple
             images},
   Journal = {Graphical Models},
   Volume = {67},
   Number = {4},
   Pages = {332-346},
   Publisher = {Elsevier BV},
   Year = {2005},
   Month = {July},
   url = {http://dx.doi.org/10.1016/j.gmod.2005.02.002},
   Abstract = {A paradigm for automatic three-dimensional shape and
             geometry rendering from multiple images is introduced in
             this paper. In particular, non-photorealistic rendering
             (NPR) techniques in the style of pen-and-ink illustrations
             are addressed, while the underlying presented ideas can be
             used in other modalities, such as halftoning, as well.
             Existing NPR approaches can be categorized in two groups
             depending on the type of input they use: image based and
             object based. Using multiple images as input to the NPR
             scheme, we propose a novel hybrid model that simultaneously
             uses information from the image and object domains. The
             benefit not only comes from combining the features of each
             approach, it also minimizes the need for manual or user
             assisted tasks in extracting scene features and geometry, as
             employed in virtually all state-of-the-art NPR approaches.
             As particular examples we use input images from binocular
             stereo and multiple-light photometric stereo systems. From
             the image domain we extract the tonal information to be
             mimicked by the NPR synthesis algorithm, and from the object
             domain we extract the geometry, mainly principal directions,
             obtained from the image set without explicitly using 3D
             models, to convey shape to the drawings. We describe a
             particular implementation of such an hybrid system and
             present a number of automatically generated pen-and-ink
             style drawings. This work then shows how to use and extend
             well-developed techniques in computer vision to address
             fundamental problems in shape representation and rendering.
             © 2005 Elsevier Inc. All rights reserved.},
   Doi = {10.1016/j.gmod.2005.02.002},
   Key = {fds264988}
}

@article{Memoli2005,
   Author = {Mémoli, F and Sapiro, G},
   Title = {A theoretical and computational framework for isometry
             invariant recognition of point cloud data},
   Journal = {Foundations of Computational Mathematics},
   Volume = {5},
   Number = {3},
   Pages = {313-347},
   Publisher = {Springer Nature},
   Year = {2005},
   Month = {July},
   ISSN = {1615-3375},
   url = {http://dx.doi.org/10.1007/s10208-004-0145-y},
   Abstract = {Point clouds are one of the most primitive and fundamental
             manifold representations. Popular sources of point clouds
             are three-dimensional shape acquisition devices such as
             laser range scanners. Another important field where point
             clouds are found is in the representation of
             high-dimensional manifolds by samples. With the increasing
             popularity and very broad applications of this source of
             data, it is natural and important to work directly with this
             representation, without having to go through the
             intermediate and sometimes impossible and distorting steps
             of surface reconstruction. A geometric framework for
             comparing manifolds given by point clouds is presented in
             this paper. The underlying theory is based on
             Gromov-Hausdorff distances, leading to isometry invariant
             and completely geometric comparisons. This theory is
             embedded in a probabilistic setting as derived from random
             sampling of manifolds, and then combined with results on
             matrices of pairwise geodesic distances to lead to a
             computational implementation of the framework. The
             theoretical and computational results presented here are
             complemented with experiments for real three-dimensional
             shapes. © 2005 SFoCM.},
   Doi = {10.1007/s10208-004-0145-y},
   Key = {Memoli2005}
}

@article{fds264731,
   Author = {Caselles, V and Kimmel, R and Sapiro, G},
   Title = {Geometric Active Contours for Image Segmentation},
   Pages = {613-627},
   Publisher = {Elsevier},
   Year = {2005},
   Month = {January},
   url = {http://dx.doi.org/10.1016/B978-012119792-6/50099-1},
   Abstract = {This chapter deals with an efficient and accurate approach
             in image segmentation: active contours. The general idea
             behind this technique is to apply partial differential
             equations (PDEs) to deform a curve or a surface toward the
             boundaries of the objects of interest in the image. The
             deformation is driven by the forces that use information
             about the objects of interest in the data. In particular,
             this chapter describes the ideas that have emerged from the
             geodesic active contours framework, focusing on some of the
             main models and referring to the literature for other
             applications. This is an example of using PDEs for image
             processing and analysis. In this case, such PDEs are derived
             as gradient-descent processes from geometric integral
             measures. This research field considers images as continuous
             geometric structures and enables the use of continuous
             mathematics such as PDEs and differential geometry. The
             chapter also discusses the computer image processing
             algorithms that are actually the numeric implementations of
             the resulting equations.},
   Doi = {10.1016/B978-012119792-6/50099-1},
   Key = {fds264731}
}

@article{fds264803,
   Author = {Bartesaghi, A and Sapiro, G},
   Title = {Tracking of moving objects under severe and total
             occlusions},
   Journal = {2005 International Conference on Image Processing (ICIP),
             Vols 1-5},
   Pages = {249-252},
   Publisher = {IEEE},
   Year = {2005},
   Month = {January},
   ISBN = {0-7803-9134-9},
   ISSN = {1522-4880},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000235773300063&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264803}
}

@article{fds264802,
   Author = {Sapiro, G},
   Title = {Inpainting the colors},
   Journal = {2005 International Conference on Image Processing (ICIP),
             Vols 1-5},
   Pages = {1265-1268},
   Year = {2005},
   ISBN = {0-7803-9134-9},
   ISSN = {1522-4880},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000235773301123&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264802}
}

@article{fds264813,
   Author = {Patwardhan, KA and Sapiro, G and Bertalmio, M},
   Title = {Video inpainting of occluding and occluded
             objects},
   Journal = {2005 International Conference on Image Processing (ICIP),
             Vols 1-5},
   Pages = {1593-1596},
   Year = {2005},
   ISBN = {0-7803-9134-9},
   ISSN = {1522-4880},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000235773302011&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264813}
}

@article{fds264974,
   Author = {Bartesaghi, A and Sapiro, G and Lee, S and Lefman, J and Wahl, S and Orenstein, J and Subramaniam, S},
   Title = {A new approach for 3D segmentation of cellular tomograms
             obtained using three-dimensional electron
             microscopy},
   Journal = {2004 2nd IEEE International Symposium on Biomedical Imaging:
             Macro to Nano},
   Volume = {1},
   Pages = {5-8},
   Year = {2004},
   Month = {December},
   Abstract = {Electron tomography allows determination of the
             three-dimensional structures of cells and tissues at
             resolutions significantly higher than is possible with
             optical microscopy. Electron tomograms contain, in
             principle, vast amounts of information on the locations and
             architectures of large numbers of subcellular assemblies and
             organelles. The development of reliable quantitative
             approaches for interpretation of features in tomograms, is
             an important problem, but is a challenging prospect because
             of the low signal-to-noise ratios that are inherent to
             biological electron microscopic images. As a first step in
             this direction, we report methods for the automated
             statistical analysis of HIV particles and selected cellular
             compartments in electron tomograms recorded from fixed,
             plastic-embedded sections derived from HIV-infected human
             macrophages. Individual features in the tomogram are
             segmented using a novel, robust algorithm that finds their
             boundaries as global minimal surfaces in a metric space
             defined by image features. Our expectation is that such
             methods will provide tools for semi-automated detection and
             statistical evaluation of HIV particles at different stages
             of assembly in the cells, and present opportunities for
             correlation with biochemical markers of HIV infection.
             ©2004 IEEE.},
   Key = {fds264974}
}

@article{fds264975,
   Author = {Yatziv, L and Sapiro, G and Levoy, M},
   Title = {Lightfield completion},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {3},
   Pages = {1787-1790},
   Publisher = {IEEE},
   Year = {2004},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2004.1421421},
   Abstract = {A light field is a 4D function representing radiance as a
             function of ray position and direction in 3D space. In this
             paper we describe a method for recovering gaps in light
             fields of scenes that contain significant occluders. In
             these situations, although a large fraction of the scene may
             be blocked in any one view, most scene points are visible in
             at least some views. As a consequence, although too much
             information is missing to employ 2D completion methods that
             operate within a single view, it may be possible to recover
             the lost information by completion in 4D-the full
             dimensionality of the light field. The proposed light field
             completion method has three main steps: Registration,
             initial estimation, and high dimensional texture synthesis
             and/or inpainting. At the registration stage, the set of
             images are shifted and re-projected so that the
             corresponding pixels from different images are aligned in
             the reconstructed light field. Following this, the
             estimation step uses a naive technique to fill-in parts of
             gaps using the available information from the multiple
             images. This serves as the initial condition for the next
             and last step, where the missing information is recovered
             via high dimensional texture synthesis and/or inpainting.
             These two steps of initial condition and completion are
             iterated. The algorithm is illustrated with real examples.
             © 2004 IEEE.},
   Doi = {10.1109/ICIP.2004.1421421},
   Key = {fds264975}
}

@article{fds264976,
   Author = {Patwardhan, KA and Sapiro, G},
   Title = {Automatic image decomposition},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {1},
   Pages = {645-648},
   Publisher = {IEEE},
   Year = {2004},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2004.1418837},
   Abstract = {The decomposition of an image into its primitive components,
             such as cartoon plus texture, is a fundamental problem in
             image processing. In [11, 16], the authors proposed a
             technique to achieve this decomposition into structure and
             texture. These two components are competing ones, and their
             proposed model has a critical parameter that controls this
             decomposition. In this paper we show how to automatically
             select this parameter, and demonstrate with examples the
             importance of this optimal selection. ©2004
             IEEE.},
   Doi = {10.1109/ICIP.2004.1418837},
   Key = {fds264976}
}

@article{fds264977,
   Author = {Mémoli, F and Sapiro, G},
   Title = {Comparing point clouds},
   Journal = {ACM International Conference Proceeding Series},
   Volume = {71},
   Pages = {32-40},
   Publisher = {ACM Press},
   Year = {2004},
   Month = {December},
   url = {http://dx.doi.org/10.1145/1057432.1057436},
   Abstract = {Point clouds are one of the most primitive and fundamental
             surface representations. A popular source of point clouds
             are three dimensional shape acquisition devices such as
             laser range scanners. Another important field where point
             clouds are found is in the representation of
             high-dimensional manifolds by samples. With the increasing
             popularity and very broad applications of this source of
             data, it is natural and important to work directly with this
             representation, without having to go to the intermediate and
             sometimes impossible and distorting steps of surface
             reconstruction. A geometric framework for comparing
             manifolds given by point clouds is presented in this paper.
             The underlying theory is based on Gromov-Hausdorff
             distances, leading to isometry invariant and completely
             geometric comparisons. This theory is embedded in a
             probabilistic setting as derived from random sampling of
             manifolds, and then combined with results on matrices of
             pairwise geodesic distances to lead to a computational
             implementation of the framework. The theoretical and
             computational results here presented are complemented with
             experiments for real three dimensional shapes. © The
             Eurographics Association 2004.},
   Doi = {10.1145/1057432.1057436},
   Key = {fds264977}
}

@article{fds264978,
   Author = {Bartesaghi, A and Sapiro, G and Malzbender, T and Gelb,
             D},
   Title = {Non-photorealistic rendering from multiple
             images},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {4},
   Pages = {2403-2406},
   Publisher = {IEEE},
   Year = {2004},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2004.1421585},
   Abstract = {A new paradigm for automatic non-photorealistic rendering
             (NPR) is introduced in this paper. Existing NPR approaches
             can be categorized in two groups depending on the type of
             input they use: image based and object based. Using multiple
             images as input to the NPR scheme, we propose a novel hybrid
             model that simultaneously uses information from the image
             and object domains. The benefit not only comes from
             combining the features of each approach, but most important,
             it minimizes the need for manual or user assisted tasks in
             extracting scene features and geometry, as employed in
             virtually all state-of-the-art NPR approaches. We describe a
             particular implementation of such an hybrid system and
             present a number of automatically generated pen-and-ink
             style drawings. This work then shows how to use and extend
             well developed techniques in computer vision to address
             fundamental problems in image representation and rendering.
             ©2004 IEEE.},
   Doi = {10.1109/ICIP.2004.1421585},
   Key = {fds264978}
}

@article{fds264983,
   Author = {Niethammer, M and Betelu, S and Sapiro, G and Tannenbaum, A and Giblin,
             PJ},
   Title = {Area-Based Medial Axis of Planar Curves.},
   Journal = {International journal of computer vision},
   Volume = {60},
   Number = {3},
   Pages = {203-224},
   Year = {2004},
   Month = {December},
   url = {http://dx.doi.org/10.1023/b:visi.0000036835.28674.d0},
   Abstract = {A new definition of affine invariant medial axis of planar
             closed curves is introduced. A point belongs to the affine
             medial axis if and only if it is equidistant from at least
             two points of the curve, with the distance being a minimum
             and given by the <i>areas</i> between the curve and its
             corresponding chords. The medial axis is robust, eliminating
             the need for curve denoising. In a dynamical interpretation
             of this affine medial axis, the medial axis points are the
             affine shock positions of the affine erosion of the curve.
             We propose a simple method to compute the medial axis and
             give examples. We also demonstrate how to use this method to
             detect affine skew symmetry in real images.},
   Doi = {10.1023/b:visi.0000036835.28674.d0},
   Key = {fds264983}
}

@article{fds264979,
   Author = {Solé, A and Caselles, V and Sapiro, G and Arándiga,
             F},
   Title = {Morse description and geometric encoding of digital
             elevation maps.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {13},
   Number = {9},
   Pages = {1245-1262},
   Year = {2004},
   Month = {September},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/15449586},
   Abstract = {Two complementary geometric structures for the topographic
             representation of an image are developed in this work. The
             first one computes a description of the Morse-topological
             structure of the image, while the second one computes a
             simplified version of its drainage structure. The
             topographic significance of the Morse and drainage
             structures of digital elevation maps (DEMs) suggests that
             they can been used as the basis of an efficient encoding
             scheme. As an application, we combine this geometric
             representation with an interpolation algorithm and lossless
             data compression schemes to develop a compression scheme for
             DEMs. This algorithm achieves high compression while
             controlling the maximum error in the decoded elevation map,
             a property that is necessary for the majority of
             applications dealing with DEMs. We present the underlying
             theory and compression results for standard DEM
             data.},
   Doi = {10.1109/tip.2004.832864},
   Key = {fds264979}
}

@article{fds264980,
   Author = {Tsai, YHR and Cheng, LT and Osher, S and Burchard, P and Sapiro,
             G},
   Title = {Visibility and its dynamics in a PDE based implicit
             framework},
   Journal = {Journal of Computational Physics},
   Volume = {199},
   Number = {1},
   Pages = {260-290},
   Publisher = {Elsevier BV},
   Year = {2004},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.jcp.2004.02.015},
   Abstract = {We investigate the problem of determining visible regions
             given a set of (moving) obstacles and a (moving) vantage
             point. Our approach to this problem is through an implicit
             framework, where the obstacles are represented by a level
             set function. The visibility problem is formally formulated
             as a boundary value problem (BVP) of a first order partial
             differential equation. It is based on the continuation of
             values along the given ray field. We propose a one-pass,
             multi-level algorithm for the construction of the solution
             on a grid. Furthermore, we study the dynamics of shadow
             boundaries on the surfaces of the obstacles when the vantage
             point moves along a given trajectory. In all of these
             situations, topological changes such as merging and breaking
             occur in the regions of interest. These are automatically
             handled by the level set framework proposed here. Finally,
             we obtain additional useful information through simple
             operations in the level set framework. © 2004 Elsevier Inc.
             All rights reserved.},
   Doi = {10.1016/j.jcp.2004.02.015},
   Key = {fds264980}
}

@article{fds322692,
   Author = {Breen, D and Fedkiw, R and Museth, K and Osher, S and Sapiro, G and Whitaker, R},
   Title = {Level set and PDE methods for computer graphics},
   Journal = {ACM SIGGRAPH 2004 Course Notes, SIGGRAPH
             2004},
   Publisher = {ACM Press},
   Year = {2004},
   Month = {August},
   ISBN = {0111456789},
   url = {http://dx.doi.org/10.1145/1103900.1103928},
   Abstract = {Level set methods, an important class of partial
             differential equation (PDE) methods, define dynamic surfaces
             implicitly as the level set (iso-surface) of a sampled,
             evolving nD function. The course begins with preparatory
             material that introduces the concept of using partial
             differential equations to solve problems in computer
             graphics, geometric modeling and computer vision. This will
             include the structure and behavior of several different
             types of differential equations, e.g. the level set equation
             and the heat equation, as well as a general approach to
             developing PDE-based applications. The second stage of the
             course will describe the numerical methods and algorithms
             needed to actually implement the mathematics and methods
             presented in the first stage. The course closes with
             detailed presentations on several level set/PDE
             applications, including image/video inpainting, pattern
             formation, image/volume processing, 3D shape reconstruction,
             image/volume segmentation, image/shape morphing, geometric
             modeling, anisotropic diffusion, and natural phenomena
             simulation.},
   Doi = {10.1145/1103900.1103928},
   Key = {fds322692}
}

@article{fds264769,
   Author = {Sapiro, G},
   Title = {Das französische literarische Feld: Struktur, Dynamik und
             Formen der Politisierung},
   Journal = {Berliner Journal für Soziologie},
   Volume = {14},
   Number = {2},
   Pages = {157-171},
   Publisher = {Springer Science and Business Media LLC},
   Year = {2004},
   Month = {June},
   ISSN = {0863-1808},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000223172300002&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1007/bf03204702},
   Key = {fds264769}
}

@article{fds264981,
   Author = {Mémoli, F and Sapiro, G and Osher, S},
   Title = {Solving variational problems and partial differential
             equations mapping into general target manifolds},
   Journal = {Journal of Computational Physics},
   Volume = {195},
   Number = {1},
   Pages = {263-292},
   Publisher = {Elsevier BV},
   Year = {2004},
   Month = {March},
   url = {http://dx.doi.org/10.1016/j.jcp.2003.10.007},
   Abstract = {A framework for solving variational problems and partial
             differential equations that define maps onto a given generic
             manifold is introduced in this paper. We discuss the
             framework for arbitrary target manifolds, while the domain
             manifold problem was addressed in [J. Comput. Phys. 174(2)
             (2001) 759]. The key idea is to implicitly represent the
             target manifold as the level-set of a higher dimensional
             function, and then implement the equations in the Cartesian
             coordinate system where this embedding function is defined.
             In the case of variational problems, we restrict the search
             of the minimizing map to the class of maps whose target is
             the level-set of interest. In the case of partial
             differential equations, we re-write all the equation's
             geometric characteristics with respect to the embedding
             function. We then obtain a set of equations that, while
             defined on the whole Euclidean space, are intrinsic to the
             implicitly defined target manifold and map into it. This
             permits the use of classical numerical techniques in
             Cartesian grids, regardless of the geometry of the target
             manifold. The extension to open surfaces and submanifolds is
             addressed in this paper as well. In the latter case, the
             submanifold is defined as the intersection of two higher
             dimensional hypersurfaces, and all the computations are
             restricted to this intersection. Examples of the
             applications of the framework here described include
             harmonic maps in liquid crystals, where the target manifold
             is a hypersphere; probability maps, where the target
             manifold is a hyperplane; chroma enhancement; texture
             mapping; and general geometric mapping between high
             dimensional manifolds. © 2003 Elsevier Inc. All rights
             reserved.},
   Doi = {10.1016/j.jcp.2003.10.007},
   Key = {fds264981}
}

@article{fds264777,
   Author = {Caselles, V and Sapiro, G and Solé, A and Ballester,
             C},
   Title = {Morse description and morphological encoding of continuous
             data},
   Journal = {Multiscale Modeling and Simulation},
   Volume = {2},
   Number = {2},
   Pages = {179-209},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2004},
   Month = {January},
   ISSN = {1540-3459},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000221348900001&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Abstract = {A geometric representation for images is studied in this
             work. This is based on two complementary geometric
             structures for the topographic representation of an image.
             The first one computes a description of the Morse structure,
             while the second one computes a simplified version of
             drainage structures. The topographic significance of the
             Morse and drainage structures of digital elevation maps
             (DEMs) suggests that they can been used as the basis of an
             efficient encoding scheme. As an application we then combine
             this geometric representation with a consistent
             interpolation algorithm and lossless data compression
             schemes to develop an efficient compression algorithm for
             DEMs. This coding scheme controls the L∞ error in the
             decoded elevation map, a property that is necessary for the
             majority of applications dealing with DEMs. We present the
             underlying theory and some compression results for standard
             DEM data.},
   Doi = {10.1137/S1540345902416557},
   Key = {fds264777}
}

@article{fds264972,
   Author = {Mémoli, F and Sapiro, G and Thompson, P},
   Title = {Implicit brain imaging.},
   Journal = {NeuroImage},
   Volume = {23 Suppl 1},
   Pages = {S179-S188},
   Year = {2004},
   Month = {January},
   ISSN = {1053-8119},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/15501087},
   Abstract = {We describe how implicit surface representations can be used
             to solve fundamental problems in brain imaging. This kind of
             representation is not only natural following the
             state-of-the-art segmentation algorithms reported in the
             literature to extract the different brain tissues, but it is
             also, as shown in this paper, the most appropriate one from
             the computational point of view. Examples are provided for
             finding constrained special curves on the cortex, such as
             sulcal beds, regularizing surface-based measures, such as
             cortical thickness, and for computing warping fields between
             surfaces such as the brain cortex. All these result from
             efficiently solving partial differential equations (PDEs)
             and variational problems on surfaces represented in implicit
             form. The implicit framework avoids the need to construct
             intermediate mappings between 3-D anatomical surfaces and
             parametric objects such planes or spheres, a complex step
             that introduces errors and is required by many other
             cortical processing approaches.},
   Doi = {10.1016/j.neuroimage.2004.07.072},
   Key = {fds264972}
}

@article{fds264794,
   Author = {Solé, A and Caselles, V and Sapiro, G and Arándiga,
             F},
   Title = {Morse description and geometric encoding of digital
             elevation maps},
   Journal = {FREE BOUNDARY PROBLEMS: THEORY AND APPLICATIONS},
   Volume = {147},
   Pages = {303-312},
   Year = {2004},
   ISBN = {3-7643-2193-8},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000189398800024&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264794}
}

@article{fds264961,
   Author = {Pichon, E and Niethammer, M and Sapiro, G},
   Title = {Color histogram equalization through mesh
             deformation},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {2},
   Pages = {117-120},
   Year = {2003},
   Month = {December},
   Abstract = {In this paper we propose an extension of grayscale histogram
             equalization for color images. For aesthetic reasons,
             previously proposed color histogram equalization techniques
             do not generate uniform color histograms. Our method will
             always generate an almost uniform color histogram thus
             making an optimal use of the color space. This is
             particularly interesting for pseudo-color scientific
             visualization. The method is based on deforming a mesh in
             color space to fit the existing histogram and then map it to
             a uniform histogram. It is a natural extension of grayscale
             histogram equalization and it can be applied to spatial and
             color space of any dimension.},
   Key = {fds264961}
}

@article{fds264964,
   Author = {Verdera, J and Caselles, V and Bertalmio, M and Sapiro,
             G},
   Title = {Inpainting surface holes},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {2},
   Pages = {903-906},
   Year = {2003},
   Month = {December},
   Abstract = {An algorithm for filling-in surface holes is introduced in
             this paper. The basic idea is to represent the surface of
             interest in implicit form, and fill-in the holes with a
             system of geometric partial differential equations derived
             from image inpainting algorithms. The framework and examples
             with synthetic and real data are presented.},
   Key = {fds264964}
}

@article{fds264966,
   Author = {Solé, A and Caselles, V and Sapiro, G and Arándiga,
             F},
   Title = {Morse description and geometric encoding of DEM
             data},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {2},
   Pages = {235-238},
   Year = {2003},
   Month = {December},
   Abstract = {Two complementary geometric structures for the topographic
             representation of an image are developed in this work. The
             first one computes a description of the Morse structure of
             the image, while the second one computes a simplified
             version of its drainage structure. The topographic
             significance of the Morse and drainage structures of Digital
             Elevation Maps (DEM) suggests that they can been used as the
             basis of an efficient encoding scheme. We combine this
             geometric representation with an interpolation algorithm and
             loss-less data compression schemes to develop a compression
             scheme for DEM. This algorithm permits to obtain compression
             results while controlling the maximum error in the decoded
             elevation map, a property that is necessary for the majority
             of applications dealing with DEM.},
   Key = {fds264966}
}

@article{fds264962,
   Author = {Bertalmio, M and Vese, L and Sapiro, G and Osher,
             S},
   Title = {Image filling-in in a decomposition space},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {853-855},
   Year = {2003},
   Month = {December},
   Abstract = {An algorithm for the simultaneous filling-in of texture and
             structure in regions of missing image information is
             presented in this paper. The basic idea is to first
             decompose the image into the sum of two functions with
             different basic characteristics, and then reconstruct each
             one of these functions separately with structure and texture
             filling-in algorithms. The first function used in the
             decomposition is of bounded variation, representing the
             underlying image structure, while the second function
             captures the texture and possible noise. The region of
             missing information in the bounded variation image is
             reconstructed using image inpainting algorithms, while the
             same region in the texture image is filled-in with texture
             synthesis techniques. The original image is then
             reconstructed adding back these two sub-images. The novel
             contribution of this paper is then in the combination of
             these three previously developed components, image
             decomposition with inpainting and texture synthesis, which
             permits the simultaneous use of filling-in algorithms that
             are suited for different image characteristics. The novelty
             in the approach is to perform filling-in in a domain
             different from the original given image space. Examples on
             real images show the advantages of this proposed
             approach.},
   Key = {fds264962}
}

@article{fds264963,
   Author = {Patwardhan, KA and Sapiro, G},
   Title = {Projection based image and video inpainting using
             wavelets},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {857-860},
   Year = {2003},
   Month = {December},
   Abstract = {In this paper, we present a technique for automatic color
             image inpainting, the art of modifying an image-region in a
             non-detectable form. The main algorithm is based on the
             theory of projections onto convex sets (POCS). The image and
             its wavelet transform are projected onto each other after
             applying suitable constraints in each domain. This technique
             exploits the frequency-spatial representation provided by
             wavelets and utilizes the correlation between the damaged
             area in the image and its neighborhood. The resulting
             restored area is homogeneous with its surrounding and
             preserves the aesthetics of the image. The same technique is
             used for simple video restoration problems. Video frames are
             stacked and treated as a 3-D volume, making a natural use of
             inter-frame correlation.},
   Key = {fds264963}
}

@article{fds264763,
   Author = {Sapiro, G},
   Title = {Forms of politicization in the French literary
             field},
   Journal = {Theory and Society},
   Volume = {32},
   Number = {5/6},
   Pages = {633-652},
   Publisher = {Springer Science and Business Media LLC},
   Year = {2003},
   Month = {December},
   ISSN = {0304-2421},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000186731100006&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1023/b:ryso.0000004920.14641.1b},
   Key = {fds264763}
}

@article{fds264967,
   Author = {Sapiro, G},
   Title = {Introduction to Partial Differential Equations and
             Variational Formulations in Image Processing},
   Journal = {Handbook of Numerical Analysis},
   Volume = {11},
   Pages = {383-461},
   Publisher = {Elsevier},
   Year = {2003},
   Month = {December},
   ISSN = {1570-8659},
   url = {http://dx.doi.org/10.1016/S1570-8659(02)11006-4},
   Doi = {10.1016/S1570-8659(02)11006-4},
   Key = {fds264967}
}

@article{fds264811,
   Author = {Sapiro, G},
   Title = {The literary field between the state and the
             market},
   Journal = {Poetics},
   Volume = {31},
   Number = {5-6},
   Pages = {441-464},
   Publisher = {Elsevier BV},
   Year = {2003},
   Month = {October},
   ISSN = {0304-422X},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000186903600008&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1016/j.poetic.2003.09.001},
   Key = {fds264811}
}

@article{fds264973,
   Author = {Gorla, G and Interrante, V and Sapiro, G},
   Title = {Texture Synthesis for 3D Shape Representation},
   Journal = {IEEE Transactions on Visualization and Computer
             Graphics},
   Volume = {9},
   Number = {4},
   Pages = {512-524},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2003},
   Month = {October},
   url = {http://dx.doi.org/10.1109/TVCG.2003.1260745},
   Abstract = {Considerable evidence suggests that a viewer's perception of
             the 3D shape of a polygonally-defined object can be
             significantly affected (either masked or enhanced) by the
             presence of a surface texture pattern. However,
             investigations into the specific mechanisms of texture's
             effect on shape perception are still ongoing and the
             question of how to design and apply a texture pattern to a
             surface in order to best facilitate shape perception remains
             open. Recently, we have suggested that, for anisotropic
             texture patterns, the accuracy of shape judgments may be
             significantly affected by the orientation of the surface
             texture pattern anisotropy with respect to the principal
             directions of curvature over the surface. However, it has
             been difficult, until this time, to conduct controlled
             studies specifically Investigating the effect of texture
             orientation on shape perception because there has been no
             simple and reliable method for texturing an arbitrary doubly
             curved surface with a specified input pattern such that the
             dominant orientation of the pattern everywhere follows a
             predefined directional vector field over the surface, while
             seams and projective distortion of the pattern are avoided.
             In this paper, we present a straightforward and highly
             efficient method for achieving such a texture and describe
             how it can potentially be used to enhance shape
             representation. Specifically, we describe a novel,
             efficient, automatic algorithm for seamlessly synthesizing,
             from a sample 2D pattern, a high resolution fitted surface
             texture in which the dominant orientation of the pattern
             locally follows a specified vector field over the surface at
             a par-pixel level and In which seams, projective distortion,
             and repetition artifacts in the texture pattern are nearly
             completely avoided. We demonstrate the robustness of our
             method with a variety of texture swatches applied to
             standard graphics data sets and we explain how our method
             can be used to facilitate research in the perception of
             shape from texture.},
   Doi = {10.1109/TVCG.2003.1260745},
   Key = {fds264973}
}

@article{fds264959,
   Author = {Hernandez, M and Barrena, R and Hernandez, G and Sapiro, G and Frangi,
             AF},
   Title = {Pre-clinical evaluation of Implicit Deformable Models for
             three-dimensional segmentation of brain aneurysms in
             CTA},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {5032 II},
   Pages = {1264-1274},
   Publisher = {SPIE},
   Year = {2003},
   Month = {September},
   url = {http://dx.doi.org/10.1117/12.483596},
   Abstract = {Knowledge of brain aneurysm dimensions is essential during
             the planning stage of minimally invasive surgical
             interventions using Guglielmi Detachable Coils (GDC). These
             parameters are obtained in clinical routine using 2D Maximum
             Intensity Projection images from Computed Tomographic
             Angiography (CTA). Automated quantification of the three
             dimensional structure of aneurysms directly from the 3D data
             set may be used to provide accurate and objective
             measurements of the clinically relevant parameters. The
             properties of Implicit Deformable Models make them suitable
             to accurately extract the three dimensional structure of the
             aneurysm and its connected vessels. We have devised a
             two-stage segmentation algorithm for this purpose. In the
             first stage, a rough segmentation is obtained by means of
             the Fast Marching Method combining a speed function based on
             a vessel enhancement filtering and a freezing algorithm. In
             the second stage, this rough segmentation provides the
             initialization for Geodesic Active Contours driven by
             region-based information. The latter problem is solved using
             the Level Set algorithm. This work presents a comparative
             study between a clinical and a computerized protocol to
             derive three geometrical descriptors of aneurysm morphology
             that are standard in assessing the viability of surgical
             treatment with GDCs. The study was performed on a data base
             of 40 brain aneurysms. The manual measurements were made by
             two neuroradiologists in two independent sessions. Both
             inter- and intra-observer variability and comparison with
             the automated method are presented. According to these
             results, Implicit Deformable Models are a suitable technique
             for this application.},
   Doi = {10.1117/12.483596},
   Key = {fds264959}
}

@article{fds264968,
   Author = {Fedkiw, RP and Sapiro, G and Shu, CW},
   Title = {Shock capturing, level sets, and PDE based methods in
             computer vision and image processing: A review of Osher's
             contributions},
   Journal = {Journal of Computational Physics},
   Volume = {185},
   Number = {2},
   Pages = {309-341},
   Publisher = {Elsevier BV},
   Year = {2003},
   Month = {March},
   url = {http://dx.doi.org/10.1016/S0021-9991(02)00016-5},
   Abstract = {In this paper we review the algorithm development and
             applications in high resolution shock capturing methods,
             evel set methods, and PDE based methods in computer vision
             and image processing. The emphasis is on Stanley Osher's
             contribution in these areas and the impact of his work. We
             will start with shock capturing methods and will review the
             Engquist-Osher scheme, TVD schemes, entropy conditions, ENO
             and WENO schemes, and numerical schemes for Hamilton-Jacobi
             type equations. Among level set methods we will review level
             set calculus, numerical techniques, fluids and materials,
             variational approach, high codimension motion, geometric
             optics, and the computation of discontinuous solutions to
             Hamilton-Jacobi equations. Among computer vision and image
             processing we will review the total variation model for
             image denoising, images on implicit surfaces, and the level
             set method in image processing and computer vision. © 2003
             Elsevier Science B.V. All rights reserved.},
   Doi = {10.1016/S0021-9991(02)00016-5},
   Key = {fds264968}
}

@article{fds264958,
   Author = {Sapiro, GR},
   Title = {Guest editorial: Introduction to the special issue on
             imaging science},
   Journal = {Journal of Mathematical Imaging and Vision},
   Volume = {18},
   Number = {1},
   Pages = {5},
   Year = {2003},
   Month = {January},
   url = {http://dx.doi.org/10.1023/A:1021887609535},
   Doi = {10.1023/A:1021887609535},
   Key = {fds264958}
}

@article{fds264965,
   Author = {Hernandez, M and Frangi, AF and Sapiro, G},
   Title = {Three-dimensional segmentation of brain aneurysms in CTA
             using non-parametric region-based information and implicit
             deformable models: Method and evaluation},
   Journal = {Lecture Notes in Computer Science},
   Volume = {2879},
   Number = {PART 2},
   Pages = {594-602},
   Year = {2003},
   Month = {January},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-540-39903-2_73},
   Abstract = {Knowledge of brain aneurysm dimensions is essential in
             minimally invasive surgical interventions using Guglielmi
             Detachable Coils. These parameters are obtained in clinical
             routine using 2D maximum intensity projection images.
             Automated quantification of the three dimensional structure
             of aneurysms directly from the 3D data set may be used to
             provide accurate and objective measurements of the
             clinically relevant parameters. In this paper we present an
             algorithm devised for the segmentation of brain aneurysms
             based on implicit deformable models combined with
             non-parametric region-based information. This work also
             presents the evaluation of the method in a clinical data
             base of 39 cases.},
   Doi = {10.1007/978-3-540-39903-2_73},
   Key = {fds264965}
}

@article{fds264969,
   Author = {Pardo, A and Sapiro, G},
   Title = {Visualization of high dynamic range images.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {12},
   Number = {6},
   Pages = {639-647},
   Year = {2003},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18237938},
   Abstract = {A novel paradigm for information visualization in high
             dynamic range images is presented in this paper. These
             images, real or synthetic, have luminance with typical
             ranges many orders of magnitude higher than that of standard
             output/viewing devices, thereby requiring some processing
             for their visualization. In contrast with existent
             approaches, which compute a single image with reduced range,
             close in a given sense to the original data, we propose to
             look for a representative set of images. The goal is then to
             produce a minimal set of images capturing the information
             all over the high dynamic range data, while at the same time
             preserving a natural appearance for each one of the images
             in the set. A specific algorithm that achieves this goal is
             presented and tested on natural and synthetic
             data.},
   Doi = {10.1109/tip.2003.812373},
   Key = {fds264969}
}

@article{fds264970,
   Author = {Rane, SD and Sapiro, G and Bertalmio, M},
   Title = {Structure and texture filling-in of missing image blocks in
             wireless transmission and compression applications.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {12},
   Number = {3},
   Pages = {296-303},
   Year = {2003},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18237909},
   Abstract = {An approach for filling-in blocks of missing data in
             wireless image transmission is presented. When compression
             algorithms such as JPEG are used as part of the wireless
             transmission process, images are first tiled into blocks of
             8 x 8 pixels. When such images are transmitted over fading
             channels, the effects of noise can destroy entire blocks of
             the image. Instead of using common retransmission query
             protocols, we aim to reconstruct the lost data using
             correlation between the lost block and its neighbors. If the
             lost block contained structure, it is reconstructed using an
             image inpainting algorithm, while texture synthesis is used
             for the textured blocks. The switch between the two schemes
             is done in a fully automatic fashion based on the
             surrounding available blocks. The performance of this method
             is tested for various images and combinations of lost
             blocks. The viability of this method for image compression,
             in association with lossy JPEG, is also discussed.},
   Doi = {10.1109/tip.2002.804264},
   Key = {fds264970}
}

@article{fds264971,
   Author = {Bertalmio, M and Vese, L and Sapiro, G and Osher,
             S},
   Title = {Simultaneous structure and texture image
             inpainting.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {12},
   Number = {8},
   Pages = {882-889},
   Year = {2003},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18237962},
   Abstract = {An algorithm for the simultaneous filling-in of texture and
             structure in regions of missing image information is
             presented in this paper. The basic idea is to first
             decompose the image into the sum of two functions with
             different basic characteristics, and then reconstruct each
             one of these functions separately with structure and texture
             filling-in algorithms. The first function used in the
             decomposition is of bounded variation, representing the
             underlying image structure, while the second function
             captures the texture and possible noise. The region of
             missing information in the bounded variation image is
             reconstructed using image inpainting algorithms, while the
             same region in the texture image is filled-in with texture
             synthesis techniques. The original image is then
             reconstructed adding back these two sub-images. The novel
             contribution of this paper is then in the combination of
             these three previously developed components, image
             decomposition with inpainting and texture synthesis, which
             permits the simultaneous use of filling-in algorithms that
             are suited for different image characteristics. Examples on
             real images show the advantages of this proposed
             approach.},
   Doi = {10.1109/tip.2003.815261},
   Key = {fds264971}
}

@article{fds264779,
   Author = {Pichon, E and Sapiro, G and Tannenbaum, A},
   Title = {Segmentation of diffusion tensor imagery},
   Journal = {DIRECTIONS IN MATHEMATICAL SYSTEMS THEORY AND
             OPTIMIZATION},
   Volume = {286},
   Pages = {239-247},
   Year = {2003},
   ISSN = {0170-8643},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000181444900018&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264779}
}

@article{fds264960,
   Author = {Bertalmio, M and Vese, L and Sapiro, G and Osher,
             S},
   Title = {Simultaneous structure and texture image
             inpainting},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Volume = {2},
   Pages = {II/707-II/712},
   Year = {2003},
   Abstract = {An algorithm for the simultaneous filling-in of texture and
             structure in regions of missing image information is
             presented in this paper. The basic idea is to first
             decompose the image into the sum of two functions with
             different basic characteristics, and then reconstruct each
             one of these functions separately with structure and texture
             filling-in algorithms. The first function used in the
             decomposition is of bounded variation, representing the
             underlying image structure, while the second function
             captures the texture and possible noise. The region of
             missing information in the bounded variation image is
             reconstructed using image inpainting algorithms, while the
             same region in the texture image is filled-in with texture
             synthesis techniques. The original image is then
             reconstructed adding back these two sub-images. The novel
             contribution of this paper is then in the combination of
             these three previously developed components, image
             decomposition with inpainting and texture synthesis, which
             permits the simultaneous use of filling-in algorithms that
             are suited for different image characteristics. Examples on
             real images show the advantages of this proposed
             approach.},
   Key = {fds264960}
}

@article{fds264953,
   Author = {Interrante, V and Gorla, G and Kim, S and Hagh-Shenas, H and Sapiro,
             G},
   Title = {Texture synthesis for 3D shape representation},
   Journal = {Journal of Vision},
   Volume = {2},
   Number = {7},
   Pages = {305-305},
   Publisher = {Association for Research in Vision and Ophthalmology
             (ARVO)},
   Year = {2002},
   Month = {December},
   url = {http://dx.doi.org/10.1167/2.7.305},
   Abstract = {If we could design the perfect texture pattern to apply to
             any smooth surface in order to enable observers to more
             accurately perceive the surface's shape in a static
             monocular image taken from an arbitrary generic viewpoint
             under standard lightingconditions, what would the
             characteristics of that texture pattern be? In order to gain
             insight into this question, our group has developed an
             efficient algorithm for synthesizing a high resolution
             texture pattern (derived from a provided 2D image, e.g. from
             the Brodatz album) over an arbitrary doubly curved surface
             in such a way that both seams and projective distortion are
             practically eliminated, and, most importantly, the
             orientation of the texture pattern is constrained to follow
             an underlying vector field over the surface at a perpixel
             level. We are using this algorithm to generate stimuli for a
             series of experiments investigating the effects of various
             texture characteristics, including orientation, on surface
             shape judgments. The results of earlier studies that we
             conducted using a more restricted class of uni-directional
             texture patterns seemed to support the hypothesis that shape
             perception is most severely impeded when the texture pattern
             consists of lines that turn in the surface, and that shape
             perception is not significantly different in the case of a
             texture pattern consisting of lines that are locally aligned
             with the first principal direction than in the case of an
             isotropic texture pattern of similar spatial frequency. Our
             new texture synthesis method enables us to extend these
             studies to a much broader class of textures, including
             patterns that contain 90-degree rotational symmetry, which
             is useful in enabling us to maintain continuity in a
             principal-direction oriented pattern as it passes through
             umbilic points where the first and second principal
             directions switch places. Images are available at
             www.cs.umn.edu/~interran/texture. Upon publication, our
             software will be made available via the web.},
   Doi = {10.1167/2.7.305},
   Key = {fds264953}
}

@article{fds264762,
   Author = {Sapiro, G},
   Title = {The structure of the French literary field during the German
             Occupation (1940–1944)},
   Journal = {Poetics},
   Volume = {30},
   Number = {5-6},
   Pages = {387-402},
   Publisher = {Elsevier BV},
   Year = {2002},
   Month = {October},
   ISSN = {0304-422X},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000180416100007&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1016/s0304-422x(02)00032-3},
   Key = {fds264762}
}

@article{fds264785,
   Author = {Sapiro, G and Lebovics, H},
   Title = {Mona Lisa's Escort. Andre Malraux and the Reinvention of
             French Culture},
   Journal = {Le Mouvement social},
   Number = {201},
   Pages = {104-104},
   Publisher = {JSTOR},
   Year = {2002},
   Month = {October},
   ISSN = {0027-2671},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000180706300015&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.2307/3779882},
   Key = {fds264785}
}

@article{fds264952,
   Author = {Rane, SD and Remus, J and Sapiro, G},
   Title = {Wavelet-domain reconstruction of lost blocks in wireless
             image transmission and packet-switched networks},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {I/309-I/312},
   Year = {2002},
   Month = {January},
   Abstract = {A fast scheme for wavelet-domain interpolation of lost image
             blocks in wireless image transmission is presented in this
             paper. In the transmission of block-coded images, fading in
             wireless channels and congestion in packet-switched networks
             can cause entire blocks to be lost. Instead of using
             retransmission query protocols, we reconstruct the lost
             block in the wavelet-domain using the correlation between
             the lost block and its neighbors. The algorithm first uses
             simple thresholding to determine the presence or absence of
             edges in the lost block. This is followed by an
             interpolation scheme, designed to minimize the blockiness
             effect, while preserving the edges or texture in the
             interior of the blocks. The interpolation scheme minimizes
             the square of the error between the border coefficients of
             the lost block and those of its neighbors, at each transform
             scale. The performance of the algorithm on standard test
             images, its low computational overhead at the decoder, and
             its performance vis-a-vis other reconstruction schemes, is
             discussed.},
   Key = {fds264952}
}

@article{fds264954,
   Author = {Rane, SD and Sapiro, G and Bertalmio, M},
   Title = {Structure and texture filling-in of missing image blocks in
             wireless transmission and compression},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {I/317-I/320},
   Year = {2002},
   Month = {January},
   Abstract = {An approach for filling-in blocks of missing data in
             wireless image transmission is presented in this paper. When
             compression algorithms such as JPEG are used as part of the
             wireless transmission process, images are first tiled into
             blocks of 8 × 8 pixels. When such images are transmitted
             over fading channels, the effects of noise can kill entire
             blocks of the image. Instead of using common retransmission
             query protocols, we aim to reconstruct the lost data using
             correlation between the lost block and its neighbors. If the
             lost block contained structure, it is reconstructed using an
             image inpainting algorithm, while texture synthesis is used
             for the textured blocks. The switch between the two schemes
             is done in a fully automatic fashion based on the
             surrounding available blocks. The performance of this method
             is tested for various images and combinations of lost
             blocks. The viability of this method for image compression,
             in association with lossy JPEG, is also discussed.},
   Key = {fds264954}
}

@article{fds264955,
   Author = {Pardo, A and Sapiro, G},
   Title = {Visualization of high dynamic range images},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {I/633-I/636},
   Year = {2002},
   Month = {January},
   Abstract = {A novel paradigm for information visualization in high
             dynamic range images is presented in this paper. These
             images, real or synthetic, have luminance with typical
             ranges many orders of magnitude higher than that of standard
             output devices, thereby requiring some processing for their
             visualization. In contrast with existent approaches, which
             compute a single image with reduced range, close in a given
             sense to the original data, we propose to look for a
             representative set of images. The goal is then to produce a
             minimal set of images capturing the information all over the
             high dynamic range data, while at the same time preserving a
             natural appearance for each one of the images in the set. A
             specific algorithm that achieves this goal is presented and
             tested on natural and synthetic data.},
   Key = {fds264955}
}

@article{fds264956,
   Author = {Faugeras, O and Perona, P and Sapiro, G},
   Title = {Special issue on partial differential equations in image
             processing, computer vision, and computer
             graphics},
   Journal = {Journal of Visual Communication and Image
             Representation},
   Volume = {13},
   Number = {1-2},
   Pages = {1-2},
   Publisher = {Elsevier BV},
   Year = {2002},
   Month = {January},
   ISSN = {1047-3203},
   url = {http://dx.doi.org/10.1006/jvci.2002.0505},
   Doi = {10.1006/jvci.2002.0505},
   Key = {fds264956}
}

@article{fds264951,
   Author = {Bertalmío, M and Cheng, LT and Osher, S and Sapiro,
             G},
   Title = {Variational problems and partial differential equations on
             implicit surfaces},
   Journal = {Journal of Computational Physics},
   Volume = {174},
   Number = {2},
   Pages = {759-780},
   Publisher = {Elsevier BV},
   Year = {2001},
   Month = {December},
   ISSN = {0021-9991},
   url = {http://dx.doi.org/10.1006/jcph.2001.6937},
   Abstract = {A novel framework for solving variational problems and
             partial differential equations for scalar and vector-valued
             data defined on surfaces is introduced in this paper. The
             key idea is to implicitly represent the surface as the level
             set of a higher dimensional function and to solve the
             surface equations in a fixed Cartesian coordinate system
             using this new embedding function. The equations are then
             both intrinsic to the surface and defined in the embedding
             space. This approach thereby eliminates the need for
             performing complicated and inaccurate computations on
             triangulated surfaces, as is commonly done in the
             literature. We describe the framework and present examples
             in computer graphics and image processing applications,
             including texture synthesis, flow field visualization, and
             image and vector field intrinsic regularization for data
             defined on 3D surfaces. © 2001 Elsevier
             Science.},
   Doi = {10.1006/jcph.2001.6937},
   Key = {fds264951}
}

@article{fds264943,
   Author = {Bertalmío, M and Bertozzi, AL and Sapiro, G},
   Title = {Navier-Stokes, fluid dynamics, and image and video
             inpainting},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Volume = {1},
   Pages = {I355-I362},
   Publisher = {IEEE Comput. Soc},
   Year = {2001},
   Month = {December},
   url = {http://dx.doi.org/10.1109/cvpr.2001.990497},
   Abstract = {Image inpainting involves filling in part of an image or
             video using information from the surrounding area.
             Applications include the restoration of damaged photographs
             and movies and the removal of selected objects. In this
             paper, we introduce a class of automated methods for digital
             inpainting. The approach uses ideas from classical fluid
             dynamics to propagate isophote lines continuously from the
             exterior into the region to be inpainted. The main idea is
             to think of the image intensity as a 'stream function' for a
             two-dimensional incompressible flow. The Laplacian of the
             image intensity plays the role of the vorticity of the
             fluid; it is transported into the region to be inpainted by
             a vector field defined by the stream function. The resulting
             algorithm is designed to continue isophotes while matching
             gradient vectors at the boundary of the inpainting region.
             The method is directly based on the Navier-Stokes equations
             for fluid dynamics, which has the immediate advantage of
             well-developed theoretical and numerical results. This is a
             new approach for introducing ideas from computational fluid
             dynamics into problems in computer vision and image
             analysis.},
   Doi = {10.1109/cvpr.2001.990497},
   Key = {fds264943}
}

@article{fds264950,
   Author = {Mémoli, F and Sapiro, G},
   Title = {Fast computation of weighted distance functions and
             geodesics on implicit hyper-surfaces},
   Journal = {Journal of Computational Physics},
   Volume = {173},
   Number = {2},
   Pages = {730-764},
   Publisher = {Elsevier BV},
   Year = {2001},
   Month = {November},
   ISSN = {0021-9991},
   url = {http://dx.doi.org/10.1006/jcph.2001.6910},
   Abstract = {An algorithm for the computationally optimal construction of
             intrinsic weighted distance functions on implicit
             hyper-surfaces is introduced in this paper. The basic idea
             is to approximate the intrinsic weighted distance by the
             Euclidean weighted distance computed in a band surrounding
             the implicit hyper-surface in the embedding space, thereby
             performing all the computations in a Cartesian grid with
             classical and efficient numerics. Based on work on geodesics
             on Riemannian manifolds with boundaries, we bound the error
             between the two distance functions. We show that this error
             is of the same order as the theoretical numerical error in
             computationally optimal, Hamilton-Jacobi-based, algorithms
             for computing distance functions in Cartesian grids.
             Therefore, we can use these algorithms, modified to deal
             with spaces with boundaries, and obtain also for the case of
             intrinsic distance functions on implicit hyper-surfaces a
             computationally efficient technique. The approach can be
             extended to solve a more general class of Hamilton-Jacobi
             equations defined on the implicit surface, following the
             same idea of approximating their solutions by the solutions
             in the embedding Euclidean space. The framework here
             introduced thereby allows for the computations to be
             performed on a Cartesian grid with computationally optimal
             algorithms, in spite of the fact that the distance and
             Hamilton-Jacobi equations are intrinsic to the implicit
             hyper-surface. © 2001 Academic Press.},
   Doi = {10.1006/jcph.2001.6910},
   Key = {fds264950}
}

@article{fds264949,
   Author = {Rane, SD and Sapiro, G},
   Title = {Evaluation of JPEG-LS, the new lossless and controlled-lossy
             still image compression standard, for compression of
             high-resolution elevation data},
   Journal = {IEEE Transactions on Geoscience and Remote
             Sensing},
   Volume = {39},
   Number = {10},
   Pages = {2298-2306},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2001},
   Month = {October},
   ISSN = {0196-2892},
   url = {http://dx.doi.org/10.1109/36.957293},
   Abstract = {The compression of elevation data is studied in this paper.
             The performance of JPEG-LS, the new international ISO/ITU
             standard for lossless and near-lossless (controlled-lossy)
             still-image compression, is investigated both for data from
             the USGS digital elevation model (DEM) database and the
             navy-provided digital terrain model (DTM) data. Using
             JPEG-LS has the advantage of working with a standard
             algorithm. Moreover, in contrast with algorithms like the
             popular JPEG-lossy standard, this algorithm permits the
             completely lossless compression of the data as well as a
             controlled lossy mode where a sharp upper bound on the
             elevation error is selected by the user. All these are
             achieved at a very low computational complexity. In addition
             to these algorithmic advantages, we show that JPEG-LS
             achieves significantly better compression results than those
             obtained with other (nonstandard) algorithms previously
             investigated for the compression of elevation data. The
             results here reported suggest that JPEG-LS can immediately
             be adopted for the compression of elevation data for a
             number of applications.},
   Doi = {10.1109/36.957293},
   Key = {fds264949}
}

@article{fds264946,
   Author = {Bartesaghi, A and Sapiro, G},
   Title = {A system for the generation of curves on 3D brain
             images.},
   Journal = {Human brain mapping},
   Volume = {14},
   Number = {1},
   Pages = {1-15},
   Year = {2001},
   Month = {September},
   ISSN = {1065-9471},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/11500986},
   Abstract = {In this study, a computational optimal system for the
             generation of curves on triangulated surfaces representing
             3D brains is described. The algorithm is based on optimally
             computing geodesics on the triangulated surfaces following
             Kimmel and Sethian ([1998]: Proc Natl Acad Sci 95:15). The
             system can be used to compute geodesic curves for accurate
             distance measurements as well as to detect sulci and gyri.
             These curves are defined based on local surface curvatures
             that are computed following a novel approach presented in
             this study. The corresponding software is available to the
             research community.},
   Doi = {10.1002/hbm.1037},
   Key = {fds264946}
}

@article{fds264947,
   Author = {Betelu, S and Sapiro, G and Tannenbaum, A and Giblin,
             PJ},
   Title = {On the computation of the affine skeletons of planar curves
             and the detection of skew symmetry},
   Journal = {Pattern Recognition},
   Volume = {34},
   Number = {5},
   Pages = {943-952},
   Publisher = {Elsevier BV},
   Year = {2001},
   Month = {May},
   ISSN = {0031-3203},
   url = {http://dx.doi.org/10.1016/S0031-3203(00)00045-5},
   Abstract = {In this paper we discuss a new approach to compute discrete
             skeletons of planar shapes which is based on affine
             distances, being therefore affine invariant. The method
             works with generic curves that may contain concave sections,
             A dynamical interpretation of the affine skeleton
             construction, based on curve evolution, is discussed as
             well. We propose an efficient implementation of the method
             and give examples. We also demonstrate how to use this
             method to detect affine skew symmetry in real images. ©
             2001 Pattern Recognition Society. Published by Elsevier
             Science Ltd. All rights reserved.},
   Doi = {10.1016/S0031-3203(00)00045-5},
   Key = {fds264947}
}

@article{fds264944,
   Author = {Pardo, A and Sapiro, G},
   Title = {Vector probability diffusion},
   Journal = {IEEE Signal Processing Letters},
   Volume = {8},
   Number = {4},
   Pages = {106-109},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2001},
   Month = {April},
   ISSN = {1070-9908},
   url = {http://dx.doi.org/10.1109/97.911471},
   Abstract = {A method for isotropic and anisotropic diffusion of vector
             probabilities in general, and posterior probabilities in
             particular, is introduced. The technique is based on
             diffusing via coupled partial differential equations
             restricted to the semi-hyperplane corresponding to
             probability functions. Both the partial differential
             equations and their corresponding numerical implementation
             guarantee that the vector remains a probability vector,
             having all its components positive and adding to one.
             Applying the method to posterior probabilities in
             classification problems, spatial and contextual coherence is
             introduced before the maximum a posteriori (MAP) decision,
             thereby improving the classification results.},
   Doi = {10.1109/97.911471},
   Key = {fds264944}
}

@article{fds264942,
   Author = {Solé, AF and Ngan, SC and Sapiro, G and Hu, X and López,
             A},
   Title = {Anisotropic 2-D and 3-D averaging of fMRI
             signals.},
   Journal = {IEEE transactions on medical imaging},
   Volume = {20},
   Number = {2},
   Pages = {86-93},
   Year = {2001},
   Month = {February},
   ISSN = {0278-0062},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/11321593},
   Abstract = {A novel method for denoising functional magnetic resonance
             imaging temporal signals is presented in this note. The
             method is based on progressively enhancing the temporal
             signal by means of adaptive anisotropic spatial averaging.
             This average is based on a new metric for comparing temporal
             signals corresponding to active fMRI regions. Examples are
             presented both for simulated and real two and
             three-dimensional data. The software implementing the
             proposed technique is publicly available for the research
             community.},
   Doi = {10.1109/42.913175},
   Key = {fds264942}
}

@article{fds264773,
   Author = {Bertalmió, M and Sapiro, G and Cheng, LT and Osher,
             S},
   Title = {Variational problems and PDEs on implicit
             surfaces},
   Journal = {Proceedings - IEEE Workshop on Variational and Level Set
             Methods in Computer Vision, VLSM 2001},
   Pages = {186-193},
   Publisher = {IEEE COMPUTER SOC},
   Year = {2001},
   Month = {January},
   ISBN = {9780769512785},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000170336200024&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Abstract = {A novel framework for solving variational problems and
             partial differential equations for scalar and vector-valued
             data defined on surfaces is introduced. The key idea is to
             implicitly represent the surface as the level set of a
             higher dimensional function, and solve the surface equations
             in a fixed Cartesian coordinate system using this new
             embedding function. The equations are then both intrinsic to
             the surface and defined in the embedding space. This
             approach thereby eliminates the need for performing
             complicated and inaccurate computations on triangulated
             surfaces, as is commonly done in the literature. We describe
             the framework and present examples in computer graphics and
             image processing applications, including texture synthesis,
             flow field visualization, as well as image and vector field
             intrinsic regularization for data defined on 3D
             surfaces.},
   Doi = {10.1109/VLSM.2001.938899},
   Key = {fds264773}
}

@article{fds264939,
   Author = {Betelu, S and Sapiro, G and Tannenbaum, A},
   Title = {Affine invariant erosion of 3D shapes},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Volume = {2},
   Pages = {174-180},
   Publisher = {IEEE Comput. Soc},
   Year = {2001},
   Month = {January},
   url = {http://dx.doi.org/10.1109/iccv.2001.937621},
   Abstract = {A new definition of affine invariant erosion of 3D surfaces
             is introduced. Instead of being based in terms of Euclidean
             distances, the volumes enclosed between the surface and its
             chords are used. The resulting erosion is insensitive to
             noise, and by construction, it is affine invariant. We prove
             some key properties about this erosion operation, and we
             propose a simple method to compute the erosion of implicit
             surfaces. We also discuss how the affine erosion can be used
             to define 3D affine invariant robust skeletons.},
   Doi = {10.1109/iccv.2001.937621},
   Key = {fds264939}
}

@article{fds264940,
   Author = {Ballester, C and Caselles, V and Verdera, J and Bertalmio, M and Sapiro,
             G},
   Title = {A variational model for filling-in gray level and color
             images},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Volume = {1},
   Pages = {10-16},
   Year = {2001},
   Month = {January},
   Abstract = {A variational approach for filling-in regions of missing
             data in gray-level and color images is introduced in this
             paper. The approach is based on joint interpolation of the
             image gray-levels and gradient/isophotes directions,
             smoothly extending in an automatic fashion the isophote
             lines into the holes of missing data. This interpolation is
             computed solving the variational problem via its gradient
             descent flow, which leads to a set of coupled second order
             partial differential equations, one for the gray-levels and
             one for the gradient orientations. The process underlying
             this approach can be considered as an interpretation of the
             Gestaltist's principle of good continuation. No limitations
             are imposed on the topology of the holes, and all regions of
             missing data can be simultaneously processed, even if they
             are surrounded by completely different structures.
             Applications of this technique include the restoration of
             old photographs and removal of superimposed text like dates,
             subtitles, or publicity. Examples of these applications are
             given.},
   Key = {fds264940}
}

@article{fds264941,
   Author = {Haker, S and Sapiro, G and Tannenbaum, A and Washburn,
             D},
   Title = {Missile tracking using knowledge-based adaptive
             thresholding},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {786-789},
   Year = {2001},
   Month = {January},
   Abstract = {In this paper, we apply a knowledge-based segmentation
             method developed for still and video images to the problem
             of tracking missiles and high speed projectiles. Since we
             are only interested in segmenting a portion of the missile
             (namely, the nose cone), we use our segmentation procedure
             as a method of adapting thresholding. The key idea is to
             utilize a priori knowledge about the objects present in the
             image, e.g. missile and background, introduced via Bayes'
             rule. Posterior probabilities obtained in this way are
             anisotropically smoothed, and the image segmentation is
             obtained via MAP classifications of the smoothed data. When
             segmenting sequences of images, the smoothed posterior
             probabilities of past frames are used as prior distributions
             in succeeding frames.},
   Key = {fds264941}
}

@article{fds264945,
   Author = {Tang, B and Sapiro, G and Caselles, V},
   Title = {Color image enhancement via chromaticity
             diffusion.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {10},
   Number = {5},
   Pages = {701-707},
   Year = {2001},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18249660},
   Abstract = {A novel approach for color image denoising is proposed in
             this paper. The algorithm is based on separating the color
             data into chromaticity and brightness, and then processing
             each one of these components with partial differential
             equations or diffusion flows. In the proposed algorithm,
             each color pixel is considered as an n-dimensional vector.
             The vectors' direction, a unit vector, gives the
             chromaticity, while the magnitude represents the pixel
             brightness. The chromaticity is processed with a system of
             coupled diffusion equations adapted from the theory of
             harmonic maps in liquid crystals. This theory deals with the
             regularization of vectorial data, while satisfying the
             intrinsic unit norm constraint of directional data such as
             chromaticity. Both isotropic and anisotropic diffusion flows
             are presented for this n-dimensional chromaticity diffusion
             flow. The brightness is processed by a scalar median filter
             or any of the popular and well established anisotropic
             diffusion flows for scalar image enhancement. We present the
             underlying theory, a number of examples, and briefly compare
             with the current literature.},
   Doi = {10.1109/83.918563},
   Key = {fds264945}
}

@article{fds264948,
   Author = {Ballester, C and Bertalmio, M and Caselles, V and Sapiro, G and Verdera,
             J},
   Title = {Filling-in by joint interpolation of vector fields and gray
             levels.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {10},
   Number = {8},
   Pages = {1200-1211},
   Year = {2001},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18255537},
   Abstract = {A variational approach for filling-in regions of missing
             data in digital images is introduced. The approach is based
             on joint interpolation of the image gray levels and
             gradient/isophotes directions, smoothly extending in an
             automatic fashion the isophote lines into the holes of
             missing data. This interpolation is computed by solving the
             variational problem via its gradient descent flow, which
             leads to a set of coupled second order partial differential
             equations, one for the gray-levels and one for the gradient
             orientations. The process underlying this approach can be
             considered as an interpretation of the Gestaltist's
             principle of good continuation. No limitations are imposed
             on the topology of the holes, and all regions of missing
             data can be simultaneously processed, even if they are
             surrounded by completely different structures. Applications
             of this technique include the restoration of old photographs
             and removal of superimposed text like dates, subtitles, or
             publicity. Examples of these applications are given. We
             conclude the paper with a number of theoretical results on
             the proposed variational approach and its corresponding
             gradient descent flow.},
   Doi = {10.1109/83.935036},
   Key = {fds264948}
}

@article{fds264957,
   Author = {Solé, AF and López, A and Sapiro, G},
   Title = {Crease enhancement diffusion},
   Journal = {Computer Vision and Image Understanding},
   Volume = {84},
   Number = {2},
   Pages = {241-248},
   Publisher = {Elsevier BV},
   Year = {2001},
   Month = {January},
   ISSN = {1077-3142},
   url = {http://dx.doi.org/10.1006/cviu.2001.0945},
   Abstract = {Ridge and valley structures are important image features,
             especially in oriented textures. Usually, the extraction of
             these structures requires a prefiltering step to regularize
             the source image. In this paper, we show that classical
             diffusion-based filters are not always appropriate for this
             task and propose a new filtering process. This new filter
             can be interpreted as an example of introducing the
             intrinsic image structure in a diffusion process. © 2001
             Elsevier Science (USA).},
   Doi = {10.1006/cviu.2001.0945},
   Key = {fds264957}
}

@article{fds264801,
   Author = {Sapiro, G},
   Title = {Harmonic map flows and image processing},
   Journal = {FOUNDATIONS OF COMPUTATIONAL MATHEMATICS},
   Volume = {284},
   Pages = {299-322},
   Year = {2001},
   ISBN = {0-521-00349-0},
   ISSN = {0076-0552},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000173560700010&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264801}
}

@article{fds264930,
   Author = {Neoh, HS and Sapiro, G},
   Title = {Using anisotropic diffusion of probability maps for activity
             detection in block-design functional MRI},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {621-624},
   Year = {2000},
   Month = {December},
   Abstract = {A new approach for improving the detection of pixels
             associated with neural activity in functional magnetic
             resonance imaging (fMRI) is presented. We propose to use
             anisotropic diffusion to exploit the spatial correlation
             between the active pixels in functional MRI. Specifically,
             in this paper the anisotropic diffusion flow is applied to a
             probability image, obtained either from t-map statistics or
             via Bayes rule. In general, this information diffusion
             technique can be incorporated into other activity detection
             algorithms before the active/non-active hard decision is
             made. Examples with simulated and real data show
             improvements over classical techniques.},
   Key = {fds264930}
}

@article{fds264934,
   Author = {Chung, DH and Sapiro, G},
   Title = {Segmenting skin lesions with partial differential equations
             based image processing algorithms},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {3},
   Pages = {[d]404-[d]407},
   Year = {2000},
   Month = {December},
   Abstract = {In this paper, a PDE-based system for detecting the boundary
             of skin lesions in digital clinical skin images is
             presented. The image is first-processed via
             contrast-enhancement and anisotropic diffusion. If the
             lesion is covered by hairs, a PDE-based continuous
             morphological filter that removes them is used as an
             additional pre-processing step. Following these steps, the
             skin lesion is segmented either by the geodesic active
             contours model or the geodesic edge tracing approach. These
             techniques are based on computing, again via PDE's, a
             geodesic curve in a space defined by the image content.
             Examples showing the performance of the algorithm are
             given.},
   Key = {fds264934}
}

@article{fds264938,
   Author = {Chung, DH and Sapiro, G},
   Title = {On the level lines and geometry of vector-valued
             images},
   Journal = {IEEE Signal Processing Letters},
   Volume = {7},
   Number = {9},
   Pages = {241-243},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2000},
   Month = {September},
   ISSN = {1070-9908},
   url = {http://dx.doi.org/10.1109/97.863143},
   Abstract = {In this letter, we extend the concept of level lines of
             scalar images to vector-valued data. Consistent with the
             scalar case, we define the level-lines of vector-valued
             images as the integral curves of the directions of minimal
             vectorial change. This direction, and the magnitude of the
             change, are computed using classical Riemannian geometry. As
             an example of the use of this new concept, we show how to
             visualize the basic geometry of vector-valued images with a
             scalar image.},
   Doi = {10.1109/97.863143},
   Key = {fds264938}
}

@article{fds264933,
   Author = {Bertalmio, M and Sapiro, G and Caselles, V and Ballester,
             C},
   Title = {Image Inpainting},
   Journal = {SIGGRAPH 2000 - Proceedings of the 27th Annual Conference on
             Computer Graphics and Interactive Techniques},
   Pages = {417-424},
   Year = {2000},
   Month = {July},
   url = {http://dx.doi.org/10.1145/344779.344972},
   Abstract = {Inpainting, the technique of modifying an image in an
             undetectable form, is as ancient as art itself. The goals
             and applications of inpainting are numerous, from the
             restoration of damaged paintings and photographs to the
             removal/replacement of selected objects. In this paper, we
             introduce a novel algorithm for digital inpainting of still
             images that attempts to replicate the basic techniques used
             by professional restorators. After the user selects the
             regions to be restored, the algorithm automatically fills-in
             these regions with information surrounding them. The fill-in
             is done in such a way that isophote lines arriving at the
             regions’ boundaries are completed inside. In contrast with
             previous approaches, the technique here introduced does not
             require the user to specify where the novel information
             comes from. This is automatically done (and in a fast way),
             thereby allowing to simultaneously fill-in numerous regions
             containing completely different structures and surrounding
             backgrounds. In addition, no limitations are imposed on the
             topology of the region to be inpainted. Applications of this
             technique include the restoration of old photographs and
             damaged film; removal of superimposed text like dates,
             subtitles, or publicity; and the removal of entire objects
             from the image like microphones or wires in special
             effects.},
   Doi = {10.1145/344779.344972},
   Key = {fds264933}
}

@article{fds264936,
   Author = {Bertalmio, M and Sapiro, G and Randall, G},
   Title = {Morphing active contours},
   Journal = {IEEE Transactions on Pattern Analysis and Machine
             Intelligence},
   Volume = {22},
   Number = {7},
   Pages = {733-737},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2000},
   Month = {July},
   ISSN = {0162-8828},
   url = {http://dx.doi.org/10.1109/34.865191},
   Abstract = {A method for deforming curves in a given image to a desired
             position in a second image is introduced in this paper. The
             algorithm is based on deforming the first image toward the
             second one via a Partial Differential Equation (PDE), while
             tracking the deformation of the curves of interest in the
             first image with an additional, coupled, PDE. The tracking
             is performed by projecting the velocities of the first
             equation into the second one. In contrast with previous
             PDE-based approaches, both the images and the curves on the
             frames/slices of interest are used for tracking. The
             technique can be applied to object tracking and sequential
             segmentation. The topology of the deforming curve can change
             without any special topology handling procedures added to
             the scheme. This permits, for example, the automatic
             tracking of scenes where, due to occlusions, the topology of
             the objects of interest changes from frame to frame. In
             addition, this work introduces the concept of projecting
             velocities to obtain systems of coupled PDEs for image
             analysis applications. We show examples for object tracking
             and segmentation of electronic microscopy.},
   Doi = {10.1109/34.865191},
   Key = {fds264936}
}

@article{fds264937,
   Author = {Chung, DH and Sapiro, G},
   Title = {Segmenting skin lesions with partial-differential-equations-based
             image processing algorithms.},
   Journal = {IEEE transactions on medical imaging},
   Volume = {19},
   Number = {7},
   Pages = {763-767},
   Year = {2000},
   Month = {July},
   ISSN = {0278-0062},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/11055791},
   Abstract = {In this paper, a partial-differential equations (PDE)-based
             system for detecting the boundary of skin lesions in digital
             clinical skin images is presented. The image is first
             preprocessed via contrast-enhancement and anisotropic
             diffusion. If the lesion is covered by hairs, a PDE-based
             continuous morphological filter that removes them is used as
             an additional preprocessing step. Following these steps, the
             skin lesion is segmented either by the geodesic active
             contours model or the geodesic edge tracing approach. These
             techniques are based on computing, again via PDEs, a
             geodesic curve in a space defined by the image content.
             Examples showing the performance of the algorithm are
             given.},
   Doi = {10.1109/42.875204},
   Key = {fds264937}
}

@article{fds264816,
   Author = {Faugeras, O and Nielsen, M and Perona, P and Romeny, BTH and Sapiro,
             G},
   Title = {Special Issue on the Second International Conference on
             Scale Space Theory in Computer Vision},
   Journal = {Journal of Visual Communication and Image
             Representation},
   Volume = {11},
   Number = {2},
   Pages = {95-95},
   Publisher = {Elsevier BV},
   Year = {2000},
   Month = {June},
   ISSN = {1047-3203},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000087184000001&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1006/jvci.1999.0436},
   Key = {fds264816}
}

@article{fds264935,
   Author = {Haker, S and Angenent, S and Tannenbaum, A and Kikinis, R and Sapiro, G and Halle, M},
   Title = {Conformal surface parameterization for texture
             mapping},
   Journal = {IEEE Transactions on Visualization and Computer
             Graphics},
   Volume = {6},
   Number = {2},
   Pages = {181-189},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2000},
   Month = {April},
   ISSN = {1077-2626},
   url = {http://dx.doi.org/10.1109/2945.856998},
   Abstract = {In this paper, we give an explicit method for mapping any
             simply connected surface onto the sphere in a manner which
             preserves angles. This technique relies on certain conformal
             mappings from differential geometry. Our method provides a
             new way to automatically assign texture coordinates to
             complex undulating surfaces. We demonstrate a finite element
             method that can be used to apply our mapping technique to a
             triangulated geometric description of a surface.},
   Doi = {10.1109/2945.856998},
   Key = {fds264935}
}

@article{fds264831,
   Author = {Betelu, S and Sapiro, G and Tannenbaum, A and Giblin,
             PJ},
   Title = {Noise-resistant affine skeletons of planar
             curves},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {1842},
   Pages = {742-754},
   Publisher = {SPRINGER},
   Editor = {Vernon, D},
   Year = {2000},
   Month = {January},
   ISBN = {3540676856},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/eccv/eccv2000-1.html},
   Abstract = {A new definition of affine invariant skeletons for shape re-
             presentation is introduced. A point belongs to the affine
             skeleton if and only if it is equidistant from at least two
             points of the curve, with the distance being a minima and
             given by the areas between the curve and its corresponding
             chords. The skeleton is robust, eliminating the need for
             curve denoising. Previous approaches have used either the
             Euclidean or affine distances, thereby resulting in a much
             less robust computation. We propose a simple method to
             compute the skeleton and give examples with real images, and
             show that the proposed definition works also for noisy data.
             We also demonstrate how to use this method to detect affine
             skew symmetry.},
   Doi = {10.1007/3-540-45054-8_48},
   Key = {fds264831}
}

@article{fds264925,
   Author = {Caselles, V and Sapiro, G and Chung, DH},
   Title = {Vector median filters, inf-sup operations, and coupled
             PDE's: Theoretical connections},
   Journal = {Journal of Mathematical Imaging and Vision},
   Volume = {12},
   Number = {2},
   Pages = {109-119},
   Year = {2000},
   Month = {January},
   url = {http://dx.doi.org/10.1023/A:1008310305351},
   Abstract = {In this paper, we formally connect between vector median
             filters, inf-sup morphological operations, and geometric
             partial differential equations. Considering a lexicographic
             order, which permits to define an order between vectors in
             IRN, we first show that the vector median filter of a
             vector-valued image is equivalent to a collection of
             infimum-supremum morphological operations. We then proceed
             and study the asymptotic behavior of this filter. We also
             provide an interpretation of the infinitesimal iteration of
             this vectorial median filter in terms of systems of coupled
             geometric partial differential equations. The main component
             of the vector evolves according to curvature motion, while,
             intuitively, the others regularly deform their level-sets
             toward those of this main component. These results extend to
             the vector case classical connections between scalar median
             filters, mathematical morphology, and mean curvature
             motion.},
   Doi = {10.1023/A:1008310305351},
   Key = {fds264925}
}

@article{fds264926,
   Author = {Tang, B and Sapiro, G and Caselles, V},
   Title = {Diffusion of general data on non-flat manifolds via harmonic
             maps theory: The direction diffusion case},
   Journal = {International Journal of Computer Vision},
   Volume = {36},
   Number = {2},
   Pages = {149-161},
   Year = {2000},
   Month = {January},
   url = {http://dx.doi.org/10.1023/A:1008152115986},
   Abstract = {In a number of disciplines, directional data provides a
             fundamental source of information. A novel framework for
             isotropic and anisotropic diffusion of directions is
             presented in this paper. The framework can be applied both
             to denoise directional data and to obtain multiscale
             representations of it. The basic idea is to apply and extend
             results from the theory of harmonic maps, and in particular,
             harmonic maps in liquid crystals. This theory deals with the
             regularization of vectorial data, while satisfying the
             intrinsic unit norm constraint of directional data. We show
             the corresponding variational and partial differential
             equations formulations for isotropic diffusion, obtained
             from an L2 norm, and edge preserving diffusion, obtained
             from an Lp norm in general and an L1 norm in particular. In
             contrast with previous approaches, the framework is valid
             for directions in any dimensions, supports non-smooth data,
             and gives both isotropic and anisotropic formulations. In
             addition, the framework of harmonic maps here described can
             be used to diffuse and analyze general image data defined on
             general non-flat manifolds, that is, functions between two
             general manifolds. We present a number of theoretical
             results, open questions, and examples for gradient vectors,
             optical flow, and color images.},
   Doi = {10.1023/A:1008152115986},
   Key = {fds264926}
}

@article{fds264928,
   Author = {Chung, DH and Sapiro, G},
   Title = {Segmentation-free skeletonization of gray-scale images via
             PDE's},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {2},
   Pages = {927-930},
   Year = {2000},
   Month = {January},
   Abstract = {A simple approach to compute the skeletons of gray-scale
             images using partial differential equations is presented in
             this paper. The proposed scheme works directly on the
             gray-scale images, without the necessity of pre-segmentation
             (binarization), or the addition of shock capturing schemes.
             This is accomplished by deforming the given image according
             to a family of modified continuous-scale erosion/dilation
             equations. With the scheme here proposed, the skeleton of
             multiple objects can be simultaneously computed. Examples on
             synthetic and real images are provided.},
   Key = {fds264928}
}

@article{fds264929,
   Author = {Haker, S and Sapiro, G and Tannenbaum, A},
   Title = {Knowledge-based segmentation of SAR data with learned
             priors.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {9},
   Number = {2},
   Pages = {299-301},
   Year = {2000},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18255401},
   Abstract = {An approach for the segmentation of still and video
             synthetic aperture radar (SAR) images is described. A priori
             knowledge about the objects present in the image, e.g.,
             target, shadow and background terrain, is introduced via
             Bayes' rule. Posterior probabilities obtained in this way
             are then anisotropically smoothed, and the image
             segmentation is obtained via MAP classifications of the
             smoothed data. When segmenting sequences of images, the
             smoothed posterior probabilities of past frames are used to
             learn the prior distributions in the succeeding frame. We
             show with examples from public data sets that this method
             provides an efficient and fast technique for addressing the
             segmentation of SAR data.},
   Doi = {10.1109/83.821747},
   Key = {fds264929}
}

@article{fds264932,
   Author = {Tang, B and Sapiro, G and Caselles, V},
   Title = {Chromaticity diffusion},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {2},
   Pages = {784-787},
   Publisher = {IEEE},
   Year = {2000},
   Month = {January},
   url = {http://dx.doi.org/10.1109/icip.2000.899826},
   Abstract = {A novel approach for color image denoising is proposed in
             this paper. The algorithm is based on separating the color
             data into chromaticity and brightness, and then processing
             each one of these components with partial differential
             equations or diffusion flows. In the proposed algorithm,
             each color pixel is considered as an n-dimensional vector.
             The vectors' direction, a unit vector, gives the
             chromaticity, while the magnitude represents the pixel
             brightness. The chromaticity is processed with a system of
             coupled diffusion equations adapted from the theory of
             harmonic maps in liquid crystals. This theory deals with the
             regularization of vectorial data, while satisfying the
             intrinsic unit norm constraint of directional data such as
             chromaticity. Both isotropic and anisotropic diffusion flows
             are presented for this n-dimensional chromaticity diffusion
             flow. The brightness is processed by a scalar median filter
             or any of the popular and well established anisotropic
             diffusion flows for scalar image enhancement. We present the
             underlying theory, a number of examples, and briefly compare
             with the current literature.},
   Doi = {10.1109/icip.2000.899826},
   Key = {fds264932}
}

@article{Weinberger2000,
   Author = {Weinberger, MJ and Seroussi, G and Sapiro, G},
   Title = {The LOCO-I lossless image compression algorithm: principles
             and standardization into JPEG-LS.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {9},
   Number = {8},
   Pages = {1309-1324},
   Year = {2000},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18262969},
   Abstract = {LOCO-I (LOw COmplexity LOssless COmpression for Images) is
             the algorithm at the core of the new ISO/ITU standard for
             lossless and near-lossless compression of continuous-tone
             images, JPEG-LS. It is conceived as a "low complexity
             projection" of the universal context modeling paradigm,
             matching its modeling unit to a simple coding unit. By
             combining simplicity with the compression potential of
             context models, the algorithm "enjoys the best of both
             worlds." It is based on a simple fixed context model, which
             approaches the capability of the more complex universal
             techniques for capturing high-order dependencies. The model
             is tuned for efficient performance in conjunction with an
             extended family of Golomb-type codes, which are adaptively
             chosen, and an embedded alphabet extension for coding of
             low-entropy image regions. LOCO-I attains compression ratios
             similar or superior to those obtained with state-of-the-art
             schemes based on arithmetic coding. Moreover, it is within a
             few percentage points of the best available compression
             ratios, at a much lower complexity level. We discuss the
             principles underlying the design of LOCO-I, and its
             standardization into JPEC-LS.},
   Doi = {10.1109/83.855427},
   Key = {Weinberger2000}
}

@article{fds264786,
   Author = {Giblin, PJ and Sapiro, G},
   Title = {Affine versions of the symmetry set},
   Journal = {REAL AND COMPLEX SINGULARITIES},
   Volume = {412},
   Pages = {173-187},
   Year = {2000},
   ISBN = {1-58488-142-9},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000084219300013&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264786}
}

@article{fds264931,
   Author = {Pardo, A and Sapiro, G},
   Title = {Vector probability diffusion},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {884-887},
   Year = {2000},
   Abstract = {A method for isotropic and anisotropic diffusion of vector
             probabilities in general, and posterior probabilities in
             particular, is introduced. The technique is based on
             diffusing via coupled partial differential equations
             restricted to the semi-hyperplane corresponding to
             probability functions. Both the partial differential
             equations and their corresponding numerical implementation
             guarantee that the vector remains a probability vector,
             having all its components positive and adding to one.
             Applying the method to posterior probabilities in
             classification problems, spatial and contextual coherences
             is introduced before the MAP decision, thereby improving the
             classification results.},
   Key = {fds264931}
}

@article{fds264921,
   Author = {Sapiro, G},
   Title = {Color and illuminant voting},
   Journal = {IEEE Transactions on Pattern Analysis and Machine
             Intelligence},
   Volume = {21},
   Number = {11},
   Pages = {1210-1215},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1999},
   Month = {December},
   ISSN = {0162-8828},
   url = {http://dx.doi.org/10.1109/34.809114},
   Abstract = {A gsometric-vision approach to color constancy and
             illuminant estimation is presented in this paper. We show a
             general framework, based on ideas from the generalized
             probabilistic Hough translorm, to estimate the illuminant
             and reflectance ot natural images. Each image pixel votes
             lor possible illuminants and the estimation is based on
             cumulative votes. The framework is natural for the
             introduction of physical constraints in the color constancy
             problem. We show the relationship of this work 1o previous
             algorithms for color constancy and present examples. © 1999
             IEEE.},
   Doi = {10.1109/34.809114},
   Key = {fds264921}
}

@article{fds264923,
   Author = {Caselles, V and Sapiro, G and Chung, DH},
   Title = {Vector median filters, morphology, and PDE's: Theoretical
             connections},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {4},
   Pages = {177-181},
   Year = {1999},
   Month = {December},
   Abstract = {In this paper, we formally connect between vector median
             filters, morphological operators, and geometric partial
             differential equations. Considering a lexicographic order,
             which permits to define an order between vectors in IRN, we
             first show that the vector median filter of a vector-valued
             image is equivalent to a collection of infimum-supremum
             morphological operations. We then proceed and study the
             asymptotic behavior of this filter. We also provide an
             interpretation of the infinitesimal iteration of this
             vectorial median filter in terms of systems of coupled
             geometric partial differential equations. The main component
             of the vector evolves according to curvature motion, while,
             intuitively, the others regularly deform their level-sets
             toward those of this main component. These results extend to
             the vector case classical connections between scalar median
             filters, mathematical morphology, and mean curvature
             motions.},
   Key = {fds264923}
}

@article{fds264924,
   Author = {Weinberger, MJ and Seroussi, G and Sapiro, G},
   Title = {From LOCO-I to the JPEG-LS standard},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {4},
   Pages = {68-72},
   Year = {1999},
   Month = {December},
   Abstract = {LOCO-I (LOw COmplexity LOssless COmpression for Images) is
             the algorithm at the core of the new ISO/ITU standard for
             lossless and near-lossless compression of continuous-tone
             images, JPEG-LS. The algorithm was conceived as a `low
             complexity projection' of the universal context modeling
             paradigm, matching its modeling unit to a simple coding unit
             based on Golomb codes. The JPEG-LS standard evolved after
             successive refinements of the core algorithm, and a
             description of its design principles and main algorithmic
             components is presented in this paper. LOCO-I/JPEG-LS
             attains compression ratios similar or superior to those
             obtained with state-of-the-art schemes based on arithmetic
             coding. Moreover, it is within a few percentage points of
             the best available compression ratios, at a much lower
             complexity level.},
   Key = {fds264924}
}

@article{fds264918,
   Author = {Bertalmio, M and Sapiro, G and Randall, G},
   Title = {Region tracking on level-sets methods.},
   Journal = {IEEE transactions on medical imaging},
   Volume = {18},
   Number = {5},
   Pages = {448-451},
   Year = {1999},
   Month = {May},
   ISSN = {0278-0062},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/10416806},
   Abstract = {Since the work by Osher and Sethian on level-sets algorithms
             for numerical shape evolutions, this technique has been used
             for a large number of applications in numerous fields. In
             medical imaging, this numerical technique has been
             successfully used, for example, in segmentation and cortex
             unfolding algorithms. The migration from a Lagrangian
             implementation to a Eulerian one via implicit
             representations or level-sets brought some of the main
             advantages of the technique, i.e., topology independence and
             stability. This migration means also that the evolution is
             parametrization free. Therefore, we do not know exactly how
             each part of the shape is deforming and the point-wise
             correspondence is lost. In this note we present a technique
             to numerically track regions on surfaces that are being
             deformed using the level-sets method. The basic idea is to
             represent the region of interest as the intersection of two
             implicit surfaces and then track its deformation from the
             deformation of these surfaces. This technique then solves
             one of the main shortcomings of the very useful level-sets
             approach. Applications include lesion localization in
             medical images, region tracking in functional MRI (fMRI)
             visualization, and geometric surface mapping.},
   Doi = {10.1109/42.774172},
   Key = {fds264918}
}

@article{fds264833,
   Author = {Black, MJ and Sapiro, G},
   Title = {Edges as outliers: Anisotropic smoothing using local image
             statistics},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {1682},
   Pages = {259-270},
   Publisher = {SPRINGER},
   Editor = {Nielsen, M and Johansen, P and Olsen, OF and Weickert,
             J},
   Year = {1999},
   Month = {January},
   ISBN = {9783540664987},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/scalespace/scalespace1999.html},
   Abstract = {Edges are viewed as statistical outliers with respect to
             local image gradient magnitudes. Within local image regions
             we compute a robust statistical measure of the gradient
             variation and use this in an anisotropic diffusion framework
             to determine a spatially varying edge- stopping" parameter
             σ. We show how to determine this parameter for two
             edge-stopping functions described in the literature
             (Perona-Malik and the Tukey biweight). Smoothing of the
             image is related the local texture and in regions of low
             texture, small gradient values may be treated as edges
             whereas in regions of high texture, large gradient magni-
             tudes are necessary before an edge is preserved. Intuitively
             these results have similarities with human perceptual
             phenomena such as masking and popout. Results are shown on a
             variety of standard images.},
   Doi = {10.1007/3-540-48236-9_23},
   Key = {fds264833}
}

@article{fds264838,
   Author = {Bertalmio, M and Sapiro, G and Randall, G},
   Title = {Region tracking on surfaces deforming via level-sets
             methods},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {1682},
   Pages = {330-338},
   Publisher = {SPRINGER},
   Editor = {Nielsen, M and Johansen, P and Olsen, OF and Weickert,
             J},
   Year = {1999},
   Month = {January},
   ISBN = {9783540664987},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/scalespace/scalespace1999.html},
   Abstract = {Since the work by Osher and Sethian on level-sets algorithms
             for numerical shape evolutions, this technique has been used
             for a large number of applications in numerous fields. In
             medical imaging, this numerical technique has been
             successfully used for example in segmentation and cortex
             unfolding algorithms. The migration from a Lagrangian im-
             plementation to an Eulerian one via implicit representations
             or level-sets brought some of the main advantages of the
             technique, mainly, topology independence and stability. This
             migration means also that the evolution is parametrization
             free, and therefore we do not know exactly how each part of
             the shape is deforming, and the point-wise correspondence is
             lost. In this note we present a technique to numerically
             track regions on sur- faces that are being deformed using
             the level-sets method. The basic idea is to represent the
             region of interest as the intersection of two implicit
             surfaces, and then track its deformation from the
             deformation of these surfaces. This technique then solves
             one of the main shortcomings of the very useful level-sets
             approach. Applications include lesion localization in
             medical images, region tracking in functional MRI
             visualization, and geometric surface mapping.},
   Doi = {10.1007/3-540-48236-9_29},
   Key = {fds264838}
}

@article{fds264839,
   Author = {Bertalmio, M and Sapiro, G and Randall, G},
   Title = {Morphing active contours},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {1682},
   Pages = {46-53},
   Publisher = {SPRINGER},
   Editor = {Nielsen, M and Johansen, P and Olsen, OF and Weickert,
             J},
   Year = {1999},
   Month = {January},
   ISBN = {9783540664987},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/scalespace/scalespace1999.html},
   Abstract = {A method for deforming curves in a given image to a desired
             position in a second image is introduced in this paper. The
             algorithm is based on deforming the first image toward the
             second one via a partial differential equation, while
             tracking the deformation of the curves of interest in the
             first image with an additional, coupled, partial
             differential equation. The tracking is performed by
             projecting the velocities of the first equation into the
             second one. In contrast with previous PDE based approaches,
             both the images and the curves on the frames/slices of
             interest are used for tracking. The technique can be applied
             to object tracking and sequential segmentation. The topology
             of the deforming curve can change, without any special
             topology handling procedures added to the scheme. This
             permits for example the automatic tracking of scenes where,
             due to occlusions, the topology of the objects of interest
             changes from frame to frame. In addition, this work
             introduces the concept of projecting velocities to obtain
             systems of coupled partial differential equations for image
             analysis applications. We show examples for object tracking
             and segmentation of electronic microscopy. We also briefly
             discuss possible uses of this framework îîfor three
             dimensional morphing.},
   Doi = {10.1007/3-540-48236-9_5},
   Key = {fds264839}
}

@article{fds264843,
   Author = {Chung, DH and Sapiro, G},
   Title = {A windows-based user friendly system for image analysis with
             partial differential equations},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {1682},
   Pages = {453-458},
   Publisher = {SPRINGER},
   Editor = {Nielsen, M and Johansen, P and Olsen, OF and Weickert,
             J},
   Year = {1999},
   Month = {January},
   ISBN = {9783540664987},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/scalespace/scalespace1999.html},
   Abstract = {In this paper we present and briefly describe a Windows
             user-friendly system designed to assist with the analysis of
             images in general, and biomedical images in particular. The
             system, which is being made publicly available to the
             research community, implements basic 2D image analysis
             operations based on partial differential equations
             (PDE’s). The system is under continuous development, and
             already includes a large number of image enhancement and
             segmentation routines that have been tested for several
             applications.},
   Doi = {10.1007/3-540-48236-9_42},
   Key = {fds264843}
}

@article{fds264920,
   Author = {Caselles, V and Lisani, JL and Morel, JM and Sapiro,
             G},
   Title = {Shape preserving local histogram modification.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {8},
   Number = {2},
   Pages = {220-230},
   Year = {1999},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18267469},
   Abstract = {A novel approach for shape preserving contrast enhancement
             is presented in this paper. Contrast enhancement is achieved
             by means of a local histogram equalization algorithm which
             preserves the level-sets of the image. This basic property
             is violated by common local schemes, thereby introducing
             spurious objects and modifying the image information. The
             scheme is based on equalizing the histogram in all the
             connected components of the image, which are defined based
             both on the grey-values and spatial relations between pixels
             in the image, and following mathematical morphology,
             constitute the basic objects in the scene. We give examples
             for both grey-value and color images.},
   Doi = {10.1109/83.743856},
   Key = {fds264920}
}

@article{fds264922,
   Author = {Tang, B and Sapiro, G and Caselles, V},
   Title = {Direction diffusion},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Volume = {2},
   Pages = {1245-1252},
   Publisher = {IEEE},
   Year = {1999},
   Month = {January},
   url = {http://dx.doi.org/10.1109/iccv.1999.790423},
   Abstract = {In a number of disciplines, directional data provides a
             fundamental source of information. A novel framework for
             isotropic and anisotropic diffusion of directions is
             presented in this paper. The framework can be applied both
             to regularize directional data and to obtain multiscale
             representations of it. The basic idea is to apply and extend
             results from the theory of harmonic maps in liquid crystals.
             This theory deals with the regularization of vectorial data,
             while satisfying the unit norm constraint of directional
             data. We show the corresponding variational and partial
             differential equations formulations for isotropic diffusion,
             obtained from an L2 norm, and edge preserving diffusion,
             obtained from an L1 norm. In contrast with previous
             approaches, the framework is valid for directions in any
             dimensions, supports non-smooth data, and gives both
             isotropic and anisotropic formulations. We present a number
             of theoretical results, open questions, and examples for
             gradient vectors, optical flow, and color
             images.},
   Doi = {10.1109/iccv.1999.790423},
   Key = {fds264922}
}

@article{fds264927,
   Author = {Olver, PJ and Sapiro, G and Tannenbaum, A},
   Title = {Affine invariant detection: Edge maps, anisotropic
             diffusion, and active contours},
   Journal = {Acta Applicandae Mathematicae},
   Volume = {59},
   Number = {1},
   Pages = {45-77},
   Year = {1999},
   Month = {January},
   url = {http://dx.doi.org/10.1023/A:1006295328209},
   Abstract = {In this paper we undertake a systematic investigation of
             affine invariant object detection and image denoising. Edge
             detection is first presented from the point of view of the
             affine invariant scale-space obtained by curvature based
             motion of the image level-sets. In this case, affine
             invariant maps are derived as a weighted difference of
             images at different scales. We then introduce the affine
             gradient as an affine invariant differential function of
             lowest possible order with qualitative behavior similar to
             the Euclidean gradient magnitude. These edge detectors are
             the basis for the extension of the affine invariant
             scale-space to a complete affine flow for image denoising
             and simplification, and to define affine invariant active
             contours for object detection and edge integration. The
             active contours are obtained as a gradient flow in a
             conformally Euclidean space defined by the image on which
             the object is to be detected. That is, we show that objects
             can be segmented in an affine invariant manner by computing
             a path of minimal weighted affine distance, the weight being
             given by functions of the affine edge detectors. The
             gradient path is computed via an algorithm which allows to
             simultaneously detect any number of objects independently of
             the initial curve topology. Based on the same theory of
             affine invariant gradient flows we show that the affine
             geometric heat flow is minimizing, in an affine invariant
             form, the area enclosed by the curve.},
   Doi = {10.1023/A:1006295328209},
   Key = {fds264927}
}

@article{fds264818,
   Author = {Teo, PC and Sapiro, G and Wandell, BA},
   Title = {Anisotropic smoothing of posterior probabilities},
   Journal = {DYNAMICAL SYSTEMS, CONTROL, CODING, COMPUTER
             VISION},
   Volume = {25},
   Pages = {419-432},
   Year = {1999},
   ISBN = {3-7643-6060-7},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000084109400020&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264818}
}

@article{fds264911,
   Author = {Vazquez, L and Sapiro, G and Randall, G},
   Title = {Segmenting neurons in electronic microscopy via geometric
             tracing},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {3},
   Pages = {814-818},
   Year = {1998},
   Month = {December},
   Abstract = {In this paper we describe a system that is being used for
             the segmentation of neurons in images obtained from
             electronic microscopy. These images are extremely noisy, and
             ordinary active contours techniques detect spurious objects
             and fail to detect the neuron boundaries. The algorithm here
             described is based on combining robust anisotropic diffusion
             with minimal weighted-path computations. After the image is
             regularized via anisotropic diffusion, the user clicks
             points on the boundary of the desired object, and the
             algorithm completes the boundary between those points. This
             tracing is based on computing paths of minimal weighted
             distance, where the weight is given by the image edge
             content. Thanks to advanced numerical algorithms, the
             algorithm is very fast and accurate. We compare our results
             with those obtained with PictureIt, a commercially available
             general purpose image processing package developed by
             Microsoft.},
   Key = {fds264911}
}

@article{fds264912,
   Author = {Haker, S and Sapiro, G and Tannenbaum, A},
   Title = {Knowledge-based segmentation of SAR images},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {597-601},
   Publisher = {IEEE Comput. Soc},
   Year = {1998},
   Month = {December},
   url = {http://dx.doi.org/10.1109/icip.1998.723572},
   Abstract = {A new approach for the segmentation of still and video SAR
             images is described in this paper. A priori knowledge about
             the objects present in the image, e.g., target, shadow, and
             background terrain, is introduced via Bayes' rule. Posterior
             probabilities obtained in this way are then anisotropically
             smoothed, and the image segmentation is obtained via MAP
             classifications of the smoothed data. When segmenting
             sequences of images, the smoothed posterior probabilities of
             past frames are used to learn the prior distributions in the
             succeeding frame. We show, via a large number of examples
             from public data sets, that this method provides an
             efficient and fast technique for addressing the segmentation
             of SAR data.},
   Doi = {10.1109/icip.1998.723572},
   Key = {fds264912}
}

@article{fds264913,
   Author = {Teo, PC and Sapiro, G and Wandell, B},
   Title = {Segmenting cortical gray matter for functional MRI
             visualization},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Pages = {292-297},
   Publisher = {Narosa Publishing House},
   Year = {1998},
   Month = {December},
   url = {http://dx.doi.org/10.1109/iccv.1998.710733},
   Abstract = {We describe a system that is being used to segment gray
             matter and create connected cortical representations from
             MRI. The method exploits knowledge of the anatomy of the
             cortex and incorporates structural constraints into the
             segmentation. First, the white matter and CSF regions in the
             MR volume are segmented using some novel techniques of
             posterior anisotropic diffusion. Then, the user selects the
             cortical white matter component of interest, and its
             structure is verified by checking for cavities and handles.
             After this, a connected representation of the gray matter is
             created by a constrained growing-out from the white matter
             boundary. Because the connectivity is computed, the
             segmentation can be used as input to several methods of
             visualizing the spatial pattern of cortical activity within
             gray matter. In our case, the connected representation of
             gray matter is used to create a representation of the
             flattened cortex. Then, fMRI measurements are overlaid on
             the flattened representation, yielding a representation of
             the volumetric data within a single image.},
   Doi = {10.1109/iccv.1998.710733},
   Key = {fds264913}
}

@article{fds264914,
   Author = {Sapiro, G},
   Title = {Bilinear voting},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Pages = {178-183},
   Publisher = {Narosa Publishing House},
   Year = {1998},
   Month = {December},
   url = {http://dx.doi.org/10.1109/iccv.1998.710716},
   Abstract = {A geometric-vision approach to solve bilinear problems in
             general, and the color constancy and illuminant estimation
             problem in particular, is presented in this paper. We show a
             general framework, based on ideas from the generalized
             (probabilistic) Hough transform, to estimate the unknown
             variables in the bilinear form. In the case of illuminant
             and reflectance estimation in natural images, each image
             pixel `votes' for possible illuminants (or reflectance), and
             the estimation is based on cumulative votes. In the general
             case, the voting is for the parameters of the bilinear
             model. The framework is natural for the introduction of
             physical constraints. For the case of illuminant estimation,
             we briefly show the relation of this work with previous
             algorithms for color constancy, and present
             examples.},
   Doi = {10.1109/iccv.1998.710716},
   Key = {fds264914}
}

@article{fds264915,
   Author = {Giblin, PJ and Sapiro, G},
   Title = {Affine invariant medial axis and skew symmetry},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Pages = {833-838},
   Publisher = {Narosa Publishing House},
   Year = {1998},
   Month = {December},
   url = {http://dx.doi.org/10.1109/iccv.1998.710814},
   Abstract = {Affine invariant medial axes and symmetry sets of planar
             shapes are introduced and studied in this paper. Two
             different approaches are presented. The first one is based
             on affine invariant distances, and defines the symmetry set,
             a set containing the medial axis as the closure of the locus
             of points on (at least) two affine normals and
             affine-equidistant from the corresponding points on the
             curve. The second approach is based on affine bitangent
             conics. In this case the symmetry set is defined as the
             closure of the locus of centers of conics with (at least)
             three-point contact with two or more distinct points on the
             curve. This is equivalent to conic and curve having, at
             those points, the same affine tangent, or the same Euclidean
             tangent and curvature. Although the two analogous
             definitions for the classical Euclidean symmetry set (medial
             axis) are equivalent, this is not the case for the affine
             group. We then show how to use the symmetry set to detect
             affine skew symmetry, proving that the contact based
             symmetry set is a straight line if and only if the given
             shape is the affine transformation of a symmetric
             object.},
   Doi = {10.1109/iccv.1998.710814},
   Key = {fds264915}
}

@article{fds264917,
   Author = {Bertalmio, M and Sapiro, G and Randall, G},
   Title = {Morphing active contours: a geometric approach to
             topology-independent image segmentation and
             tracking},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {3},
   Pages = {318-322},
   Year = {1998},
   Month = {December},
   Abstract = {A method for deforming curves in a given image to a desired
             position in a second image is introduced in this paper. The
             algorithm is based on deforming the first image toward the
             second one via a partial differential equation, while
             tracking the deformation of the curves of interest in the
             first image with an additional, coupled, partial
             differential equation. The technique can be applied to
             abject tracking and slice-by-slice segmentation of 3D data.
             The topology of the deforming curve can change, without any
             special topology handling procedures added to the scheme.
             This permits for example the automatic tracking of scenes
             where, due to occlusions, the topology of the objects of
             interest changes from frame to frame.},
   Key = {fds264917}
}

@article{fds264919,
   Author = {Weinberger, M and Seroussi, G and Sapiro, G},
   Title = {LOCO-I lossless image compression algorithm: Principles and
             standardization into JPEG-LS},
   Number = {HPL-98-193},
   Pages = {1-31},
   Year = {1998},
   Month = {November},
   Abstract = {LOCO-I (LOw COmplexity LOssless COmpression for Images) is
             the algorithm at the core of the new ISO/ITU standard for
             lossless and near-lossless compression of continuous-tone
             images, JPEG-LS. It is conceived as a `low complexity
             projection' of the universal context modeling paradigm,
             matching its modeling unit to a simple coding unit. By
             combining simplicity with the compression potential of
             context models, the algorithm `enjoys the best of both
             worlds'. It is based on a simple fixed context model, which
             approaches the capability of the more complex universal
             techniques for capturing high-order dependencies. The model
             is tuned for efficient performance in conjunction with an
             extended family of Golomb-type codes, which are adaptively
             chosen, and an embedded alphabet extension for coding of
             low-entropy image regions. LOCO-I attains compression ratios
             similar or superior to those obtained with state-of-the-art
             schemes based on arithmetic coding. Moreover, it is within a
             few percentage points of the best available compression
             ratios, at a complexity level estimated at an order of
             magnitude lower. We discuss the principles underlying the
             design of LOCO-I, and its standardization into
             JPEG-LS.},
   Key = {fds264919}
}

@article{fds264809,
   Author = {Caselles, V and Morel, J},
   Title = {Introduction to the special issue on partial differential
             equations and geometry-driven diffusion in image processing
             and analysis.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {7},
   Number = {3},
   Pages = {269-273},
   Year = {1998},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000072202300001&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1109/tip.1998.661176},
   Key = {fds264809}
}

@article{fds264909,
   Author = {Black, MJ and Sapiro, G and Marimont, DH and Heeger,
             D},
   Title = {Robust anisotropic diffusion.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {7},
   Number = {3},
   Pages = {421-432},
   Year = {1998},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18276262},
   Abstract = {Relations between anisotropic diffusion and robust
             statistics are described in this paper. Specifically, we
             show that anisotropic diffusion can be seen as a robust
             estimation procedure that estimates a piecewise smooth image
             from a noisy input image. The "edge-stopping" function in
             the anisotropic diffusion equation is closely related to the
             error norm and influence function in the robust estimation
             framework. This connection leads to a new "edge-stopping"
             function based on Tukey's biweight robust estimator that
             preserves sharper boundaries than previous formulations and
             improves the automatic stopping of the diffusion. The robust
             statistical interpretation also provides a means for
             detecting the boundaries (edges) between the piecewise
             smooth regions in an image that has been smoothed with
             anisotropic diffusion. Additionally, we derive a
             relationship between anisotropic diffusion and
             regularization with line processes. Adding constraints on
             the spatial organization of the line processes allows us to
             develop new anisotropic diffusion equations that result in a
             qualitative improvement in the continuity of
             edges.},
   Doi = {10.1109/83.661192},
   Key = {fds264909}
}

@article{fds264910,
   Author = {Giblin, PJ and Sapiro, G},
   Title = {Affine-Invariant Distances, Envelopes and Symmetry
             Sets},
   Journal = {Geometriae Dedicata},
   Volume = {71},
   Number = {3},
   Pages = {237-261},
   Year = {1998},
   Month = {January},
   url = {http://dx.doi.org/10.1023/A:1005099011913},
   Abstract = {Affine invariant symmetry sets of planar curves are
             introduced and studied in this paper. Two different
             approaches are investigated. The first one is based on
             affine invariant distances, and defines the symmetry set as
             the closure of the locus of points on (at least) two affine
             normals and affine-equidistant from the corresponding points
             on the curve. The second approach is based on affine
             bitangent conics. In this case the symmetry set is defined
             as the closure of the locus of centers of conics with (at
             least) 3-point contact with the curve at two or more
             distinct points on the curve. This is equivalent to conic
             and curve having, at those points, the same affine tangent,
             or the same Euclidean tangent and curvature. Although the
             two analogous definitions for the classical Euclidean
             symmetry set are equivalent, this is not the case for the
             affine group. We present a number of properties of both
             affine symmetry sets, showing their similarities with and
             differences from the Euclidean case. We conclude the paper
             with a discussion of possible extensions to higher
             dimensions and other transformation groups, as well as to
             invariant Voronoi diagrams.},
   Doi = {10.1023/A:1005099011913},
   Key = {fds264910}
}

@article{fds264916,
   Author = {Angenent, S and Sapiro, G and Tannenbaum, A},
   Title = {On the affine heat equation for non-convex
             curves},
   Journal = {Journal of the American Mathematical Society},
   Volume = {11},
   Number = {3},
   Pages = {601-634},
   Year = {1998},
   Month = {January},
   url = {http://dx.doi.org/10.1090/s0894-0347-98-00262-8},
   Doi = {10.1090/s0894-0347-98-00262-8},
   Key = {fds264916}
}

@article{fds264896,
   Author = {Caselles, V and Kimmel, R and Sapiro, G and Sbert,
             C},
   Title = {Minimal surfaces based object segmentation},
   Journal = {IEEE Transactions on Pattern Analysis and Machine
             Intelligence},
   Volume = {19},
   Number = {4},
   Pages = {394-398},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1997},
   Month = {December},
   ISSN = {0162-8828},
   url = {http://dx.doi.org/10.1109/34.588023},
   Abstract = {A geometric approach for 3D object segmentation and
             representation is presented. The segmentation is obtained by
             deformable surfaces moving towards the objects to be
             detected in the 3D image. The model is based on curvature
             motion and the computation of surfaces with minimal areas
             better known as minimal surfaces. The space where the
             surfaces are computed is induced from the 3D image
             (volumetric data) in which the objects are to be detected.
             The model links between classical deformable surfaces
             obtained via energy minimization and intrinsic ones derived
             from curvature based flows. The new approach is stable
             robust and automatically handles changes in the surface
             topology during the deformation. © 1997
             IEEE.},
   Doi = {10.1109/34.588023},
   Key = {fds264896}
}

@article{fds264897,
   Author = {Teo, PC and Sapiro, G and Wandell, BA},
   Title = {Creating connected representations of cortical gray matter
             for functional MRI visualization.},
   Journal = {IEEE transactions on medical imaging},
   Volume = {16},
   Number = {6},
   Pages = {852-863},
   Year = {1997},
   Month = {December},
   ISSN = {0278-0062},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/9533585},
   Abstract = {We describe a system that is being used to segment gray
             matter from magnetic resonance imaging (MRI) and to create
             connected cortical representations for functional MRI
             visualization (fMRI). The method exploits knowledge of the
             anatomy of the cortex and incorporates structural
             constraints into the segmentation. First, the white matter
             and cerebral spinal fluid (CSF) regions in the MR volume are
             segmented using a novel techniques of posterior anisotropic
             diffusion. Then, the user selects the cortical white matter
             component of interest, and its structure is verified by
             checking for cavities and handles. After this, a connected
             representation of the gray matter is created by a
             constrained growing-out from the white matter boundary.
             Because the connectivity is computed, the segmentation can
             be used as input to several methods of visualizing the
             spatial pattern of cortical activity within gray matter. In
             our case, the connected representation of gray matter is
             used to create a flattened representation of the cortex.
             Then, fMRI measurements are overlaid on the flattened
             representation, yielding a representation of the volumetric
             data within a single image. The software is freely available
             to the research community.},
   Doi = {10.1109/42.650881},
   Key = {fds264897}
}

@article{fds264898,
   Author = {Black, M and Sapiro, G and Marimont, D and Heeger,
             D},
   Title = {Robust anisotropic diffusion and sharpening of scalar and
             vector images},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {263-266},
   Publisher = {IEEE Comput. Soc},
   Year = {1997},
   Month = {December},
   url = {http://dx.doi.org/10.1109/icip.1997.647755},
   Abstract = {Relations between anisotropic diffusion and robust
             statistics are described in this paper. We show that
             anisotropic diffusion can be seen as a robust estimation
             procedure that estimates a piecewise smooth image from a
             noisy input image. The `edge-stopping' function in the
             anisotropic diffusion equation is closely related to the
             error norm and influence function in the robust estimation
             framework. This connection leads to a new `edge-stopping'
             function based on Tukey's biweight robust estimator, that
             preserves sharper boundaries than previous formulations and
             improves the automatic stopping of the diffusion. The robust
             statistical interpretation also provides a means for
             detecting the boundaries (edges) between the piecewise
             smooth regions in the image. We extend the framework to
             vector-valued images and show applications to robust image
             sharpening.},
   Doi = {10.1109/icip.1997.647755},
   Key = {fds264898}
}

@article{fds264899,
   Author = {Caselles, V and Lisani, JL and Morel, JM and Sapiro,
             G},
   Title = {Shape preserving local contrast enhancement},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {314-317},
   Publisher = {IEEE Comput. Soc},
   Year = {1997},
   Month = {December},
   url = {http://dx.doi.org/10.1109/icip.1997.647769},
   Abstract = {A novel approach for shape preserving contrast enhancement
             is presented in this paper. Contrast enhancement is achieved
             by means of a local histogram equalization algorithm which
             preserves the level-sets of the image. This basic property
             is violated by common local schemes, thereby introducing
             spurious objects and modifying the image information. The
             scheme is based on equalizing the histogram in all the
             connected components of the image, which are defined based
             on the image grey-values and spatial relations between its
             pixels. Following mathematical morphology, these constitute
             the basic objects in the scene. We give examples for both
             grey-valued and color images.},
   Doi = {10.1109/icip.1997.647769},
   Key = {fds264899}
}

@article{fds264901,
   Author = {Teo, PC and Sapiro, G and Wandell, BA},
   Title = {Anisotropic smoothing of posterior probabilities},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {675-678},
   Publisher = {IEEE Comput. Soc},
   Year = {1997},
   Month = {December},
   url = {http://dx.doi.org/10.1109/icip.1997.648003},
   Abstract = {Recently, we proposed an efficient image segmentation
             technique that anisotropically smoothes the homogeneous
             posterior probabilities before independent pixelwise MAP
             classification is carried out. In this paper, we develop the
             mathematical theory underlying the technique. We demonstrate
             that prior anisotropic smoothing of the posterior
             probabilities yields the MAP solution of a discrete MRF with
             a non-interacting, analog discontinuity field. In contrast,
             isotropic smoothing of the posterior probabilities is
             equivalent to computing the MAP solution of a single,
             discrete MRF using continuous relaxation labeling. Combining
             a discontinuity field with a discrete MRT is important as it
             allows the disabling of clique potentials across
             discontinuities. Furthermore, explicit representation of the
             discontinuity field suggests new algorithms that incorporate
             properties like hysteresis and non-maximal
             suppression.},
   Doi = {10.1109/icip.1997.648003},
   Key = {fds264901}
}

@article{fds264906,
   Author = {Ringach, DL and Sapiro, G and Shapley, R},
   Title = {A subspace reverse-correlation technique for the study of
             visual neurons.},
   Journal = {Vision research},
   Volume = {37},
   Number = {17},
   Pages = {2455-2464},
   Year = {1997},
   Month = {September},
   ISSN = {0042-6989},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/9381680},
   Abstract = {A new discrete-time reverse-correlation scheme for the study
             of visual neurons is proposed. The visual stimulus is
             generated by drawing with uniform probability, at each
             refresh time, an image from a finite set S of orthonormal
             images. We show that if the neuron can be modeled as a
             spatiotemporal linear filter followed by a static
             nonlinearity, the cross-correlation between the input image
             sequence and the cell's spike train output gives the
             projection of the receptive field onto the subspace spanned
             by S. The technique has been applied to the analysis of
             simple cells in the primary visual cortex of cats and
             macaque monkeys. Experimental results are presented where S
             spans a subspace of spatially low-pass signals. Advantages
             of the proposed scheme over standard white-noise techniques
             include improved signal to noise ratios, increased spatial
             resolution, and the possibility to restrict the study to
             particular subspaces of interest.},
   Doi = {10.1016/s0042-6989(96)00247-7},
   Key = {fds264906}
}

@article{fds264772,
   Author = {Sapiro, G and Simonin, A},
   Title = {Les Editions de Minuit 1942-1955. Le devoir
             d'insoumission},
   Journal = {Le Mouvement social},
   Number = {180},
   Pages = {244-244},
   Publisher = {JSTOR},
   Year = {1997},
   Month = {July},
   ISSN = {0027-2671},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1997XY37900042&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.2307/3779380},
   Key = {fds264772}
}

@article{fds264798,
   Author = {Sapiro, G},
   Title = {Les conditions professionnelles d'une mobilisation reussie:
             le Comite national des ecrivains},
   Journal = {Le Mouvement social},
   Number = {180},
   Pages = {179-179},
   Publisher = {JSTOR},
   Year = {1997},
   Month = {July},
   ISSN = {0027-2671},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1997XY37900011&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.2307/3779354},
   Key = {fds264798}
}

@article{fds264904,
   Author = {Sapiro, G and Caselles, V},
   Title = {Histogram Modification via Differential Equations},
   Journal = {Journal of Differential Equations},
   Volume = {135},
   Number = {2},
   Pages = {238-268},
   Publisher = {Elsevier BV},
   Year = {1997},
   Month = {April},
   url = {http://dx.doi.org/10.1006/jdeq.1996.3237},
   Abstract = {The explicit use of partial differential equations (PDEs) in
             image processing became a major research topic in the past
             years. In this work we present a framework for histogram
             (pixel-value distribution) modification via ordinary and
             partial differential equations. In this way, the image
             contrast is improved. We show that the histogram can be
             modified to achieve any given distribution as the steady
             state solution of an image flow. The contrast modification
             can be performed while simultaneously reducing noise in a
             unique PDE, avoiding noise sharpening effects of classical
             algorithms. The approach is extended to local contrast
             enhancement as well. A variational interpretation of the
             flow is presented and theoretical results on the existence
             of solutions are given. © 1997 Academic
             Press.},
   Doi = {10.1006/jdeq.1996.3237},
   Key = {fds264904}
}

@article{fds264903,
   Author = {Caselles, V and Lisani, JL and Morel, JM and Sapiro,
             G},
   Title = {Shape preserving local histogram modification},
   Number = {97-58},
   Pages = {1-20},
   Year = {1997},
   Month = {April},
   Abstract = {A novel approach for shape preserving contrast enhancement
             is presented in this paper. Contrast enhancement is achieved
             by means of a local histogram equalization algorithm which
             preserves the level-sets of the image. This basic property
             is violated by common local schemes, thereby introducing
             spurious objects and modifying the image information. The
             scheme is based on equalizing the histogram in all the
             connected components of the image, which are defined based
             both on the grey-values and spatial relations between pixels
             in the image, and following mathematical morphology,
             constitute the basic objects in the scene. We give examples
             for both grey-value and color images.},
   Key = {fds264903}
}

@article{fds264900,
   Author = {Pollick, FE and Sapiro, G},
   Title = {Constant affine velocity predicts the 1/3 power law of
             planar motion perception and generation.},
   Journal = {Vision research},
   Volume = {37},
   Number = {3},
   Pages = {347-353},
   Year = {1997},
   Month = {February},
   ISSN = {0042-6989},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/9135867},
   Abstract = {Numerous studies have shown that the power of 1/3 is
             important in relating Euclidean velocity to radius of
             curvature (R) in the generation and perception of planar
             movement. Although the relation between velocity and
             curvature is clear and very intuitive, no valid explanation
             for the specific 1/3 value has yet been found. We show that
             if instead of computing the Euclidean velocity we compute
             the affine one, a velocity which is invariant to affine
             transformations, then we obtain that the unique function of
             R which will give (constant) affine invariant velocity is
             precisely R1/3. This means that the 1/3 power law,
             experimentally found in the studies of hand-drawing and
             planar motion perception, implies motion at constant affine
             velocity. Since drawing/perceiving at constant affine
             velocity implies that curves of equal affine length will be
             drawn in equal time, we performed an experiment to further
             support this result. Results showed agreement between the
             1/3 power law and drawing at constant affine velocity.
             Possible reasons for the appearance of affine
             transformations in the generation and perception of planar
             movement are discussed.},
   Doi = {10.1016/s0042-6989(96)00116-2},
   Key = {fds264900}
}

@article{fds376577,
   Author = {Black, MJ and Sapiro, G and Marimont, D and Heeger,
             D},
   Title = {Robust anisotropic diffusion: Connections between robust
             statistics, line processing, and anisotropic
             diffusion},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {1252},
   Pages = {323-326},
   Year = {1997},
   Month = {January},
   ISBN = {9783540631675},
   url = {http://dx.doi.org/10.1007/3-540-63167-4_27},
   Abstract = {Relations between anisotropic diffusion and robust
             statistics are described in this paper. We show that
             anisotropic diffusion can be seen as a robust estimation
             procedure that estimates a piecewise smooth image from a
             noisy input image. The "edge-stopping" function in the
             anisotropic diffusion equation is closely related to the
             error norm and influence function in the robust estimation
             framework. This connection leads to a new "edge-stopping"
             function based on Tukey's biweightrobust estimator, that
             preserves sharper boundaries than previous formulations and
             improves the automatic stopping of the diffusion. The robust
             statistical interpretation also provides a means for
             detecting the boundaries (edges) between the piecewise
             smooth regions in the image. Finally, connections between
             robust estimation and line processing provide a framework to
             introduce spatial coherence in anisotropic diffusion
             flows.},
   Doi = {10.1007/3-540-63167-4_27},
   Key = {fds376577}
}

@article{fds264894,
   Author = {Sapiro, G and Cohen, A and Bruckstein, AM},
   Title = {A Subdivision Scheme for Continuous-Scale B-Splines and
             Affine-Invariant Progressive Smoothing},
   Journal = {Journal of Mathematical Imaging and Vision},
   Volume = {7},
   Number = {1},
   Pages = {23-40},
   Year = {1997},
   Month = {January},
   ISSN = {0924-9907},
   url = {http://dx.doi.org/10.1023/A:1008261923192},
   Abstract = {Multiscale representations and progressive smoothing
             constitute an important topic in different fields as
             computer vision, CAGD, and image processing. In this work, a
             multiscale representation of planar shapes is first
             described. The approach is based on computing classical
             B-splines of increasing orders, and therefore is
             automatically affine invariant. The resulting representation
             satisfies basic scale-space properties at least in a
             qualitative form, and is simple to implement. The
             representation obtained in this way is discrete in scale,
             since classical B-splines are functions in Ck-2, where k is
             an integer bigger or equal than two. We present a
             subdivision scheme for the computation of B-splines of
             finite support at continuous scales. With this scheme,
             B-splines representations in Cr are obtained for any real r
             in [0, ∞), and the multiscale representation is extended
             to continuous scale. The proposed progressive smoothing
             receives a discrete set of points as initial shape, while
             the smoothed curves are represented by continuous
             (analytical) functions, allowing a straightforward
             computation of geometric characteristics of the
             shape.},
   Doi = {10.1023/A:1008261923192},
   Key = {fds264894}
}

@article{fds264895,
   Author = {Teo, PC and Sapiro, G and Wandell, BA},
   Title = {Anatomically consistent segmentation of the human cortex for
             functional MRI visualization},
   Journal = {HP Laboratories Technical Report},
   Number = {97-3},
   Pages = {1-21},
   Year = {1997},
   Month = {January},
   Abstract = {We describe a system that is being used to segment gray
             matter from volumetric representations of the human cortex
             obtained using magnetic resonance imaging. The segmentation
             algorithm identifies gray matter voxels and computes their
             connectivity. The method differs from existing schemes in
             that it exploits knowledge of the anatomy of human cortex
             and produces anatomically consistent segmentations. The
             method is based on a novel and computationally efficient
             technique of incorporating structural constraints into the
             segmentation algorithm. Because the gray matter segmentation
             is anatomically consistent, it can be used together with
             functional magnetic resonance imaging measurements to
             visualize the spatial pattern of cortical activity within
             the gray matter.},
   Key = {fds264895}
}

@article{fds264902,
   Author = {Olver, PJ and Sapiro, G and Tannenbaum, A},
   Title = {Invariant geometric evolutions of surfaces and volumetric
             smoothing},
   Journal = {SIAM Journal on Applied Mathematics},
   Volume = {57},
   Number = {1},
   Pages = {176-194},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {1997},
   Month = {January},
   url = {http://dx.doi.org/10.1137/s0036139994266311},
   Abstract = {The study of geometric flows for smoothing, multiscale
             representation, and analysis of two- and three-dimensional
             objects has received much attention in the past few years.
             In this paper, we first survey the geometric smoothing of
             curves and surfaces via geometric heat-type flows, which are
             invariant under the groups of Euclidean and affine motions.
             Second, using the general theory of differential invariants,
             we determine the general formula for a geometric
             hypersurface evolution which is invariant under a prescribed
             symmetry group. As an application, we present the simplest
             affine invariant flow for (convex) surfaces in
             three-dimensional space, which, like the affine-invariant
             curve shortening flow, will be of fundamental importance in
             the processing of three-dimensional images.},
   Doi = {10.1137/s0036139994266311},
   Key = {fds264902}
}

@article{fds264905,
   Author = {Caselles, V and Kimmel, R and Sapiro, G and Sbert,
             C},
   Title = {Minimal surfaces: A geometric three dimensional segmentation
             approach},
   Journal = {Numerische Mathematik},
   Volume = {77},
   Number = {4},
   Pages = {423-451},
   Publisher = {Springer Nature},
   Year = {1997},
   Month = {January},
   url = {http://dx.doi.org/10.1007/s002110050294},
   Abstract = {A novel geometric approach for three dimensional object
             segmentation is presented. The scheme is based on geometric
             deformable surfaces moving towards the objects to be
             detected. We show that this model is related to the
             computation of surfaces of minimal area (local minimal
             surfaces). The space where these surfaces are computed is
             induced from the three dimensional image in which the
             objects are to be detected. The general approach also shows
             the relation between classical deformable surfaces obtained
             via energy minimization and geometric ones derived from
             curvature flows in the surface evolution framework. The
             scheme is stable, robust, and automatically handles changes
             in the surface topology during the deformation. Results
             related to existence, uniqueness, stability, and correctness
             of the solution to this geometric deformable model are
             presented as well. Based on an efficient numerical algorithm
             for surface evolution, we present a number of examples of
             object detection in real and synthetic images.},
   Doi = {10.1007/s002110050294},
   Key = {fds264905}
}

@article{fds264907,
   Author = {Sapiro, G},
   Title = {Color Snakes},
   Journal = {Computer Vision and Image Understanding},
   Volume = {68},
   Number = {2},
   Pages = {247-253},
   Publisher = {Elsevier BV},
   Year = {1997},
   Month = {January},
   url = {http://dx.doi.org/10.1006/cviu.1997.0562},
   Abstract = {A framework for object segmentation in vector-valued images
             is presented in this paper. The first scheme proposed is
             based on geometric active contours moving toward the objects
             to be detected in the vector-valued image. Object boundaries
             are obtained as geodesies or minimal weighted-distance
             curves, where the metric is given by a definition of edges
             in vector-valued data. The curve flow corresponding to the
             proposed active contours holds formal existence, uniqueness,
             stability, and correctness results. The scheme automatically
             handles changes in the deforming curve topology. The
             technique is applicable, for example, to color and texture
             images as well as multiscale representations. We then
             present an extension of these vector active contours,
             proposing a possible image flow for vector-valued image
             segmentation. The algorithm is based on moving each one of
             the image level sets according to the proposed vector active
             contours. This extension also shows the relation between
             active contours and a number of partial-differential-equations-based
             image processing algorithms as anisotropic diffusion and
             shock filters. © 1997 Academic Press.},
   Doi = {10.1006/cviu.1997.0562},
   Key = {fds264907}
}

@article{fds264908,
   Author = {Sapiro, G and Caselles, V},
   Title = {Contrast Enhancement via Image Evolution
             Flows},
   Journal = {Graphical Models and Image Processing},
   Volume = {59},
   Number = {6},
   Pages = {407-416},
   Publisher = {Elsevier BV},
   Year = {1997},
   Month = {January},
   url = {http://dx.doi.org/10.1006/gmip.1997.0446},
   Abstract = {A framework for contrast enhancement via image evolution
             flows and variational formulations is introduced in this
             paper. First, an algorithm for histogram modification via
             image evolution equations is presented. We show that the
             image histogram can be modified to achieve any given
             distribution as the steady state solution of this
             differential equation. We then prove that the proposed
             evolution equation solves an energy minimization problem.
             This gives a new interpretation to histogram modification
             and contrast enhancement in general. This interpretation is
             completely formulated in the image domain, in contrast with
             classical techniques for histogram modification which are
             formulated in a probabilistic domain. From this, new
             algorithms for contrast enhancement, including, for example,
             image and perception models, can be derived. Based on the
             energy formulation and its corresponding differential form,
             we show that the proposed histogram modification algorithm
             can be combined with image regularization schemes. This
             allows us to perform simulations contrast enhancement and
             denoising, avoiding common noise sharpening effects in
             classical schemes. Theoretical results regarding the
             existence of solutions to the proposed equations are
             presented. © 1997 Academic Press.},
   Doi = {10.1006/gmip.1997.0446},
   Key = {fds264908}
}

@article{Caselles1997,
   Author = {Caselles, V and Kimmel, R and Sapiro, G},
   Title = {Geodesic Active Contours},
   Journal = {International Journal of Computer Vision},
   Volume = {22},
   Number = {1},
   Pages = {61-79},
   Year = {1997},
   Month = {January},
   ISSN = {0920-5691},
   url = {http://dx.doi.org/10.1023/A:1007979827043},
   Abstract = {A novel scheme for the detection of object boundaries is
             presented. The technique is based on active contours
             evolving in time according to intrinsic geometric measures
             of the image. The evolving contours naturally split and
             merge, allowing the simultaneous detection of several
             objects and both interior and exterior boundaries. The
             proposed approach is based on the relation between active
             contours and the computation of geodesics or minimal
             distance curves. The minimal distance curve lays in a
             Riemannian space whose metric is defined by the image
             content. This geodesic approach for object segmentation
             allows to connect classical "snakes" based on energy
             minimization and geometric active contours based on the
             theory of curve evolution. Previous models of geometric
             active contours are improved, allowing stable boundary
             detection when their gradients suffer from large variations,
             including gaps. Formal results concerning existence,
             uniqueness, stability, and correctness of the evolution are
             presented as well. The scheme was implemented using an
             efficient algorithm for curve evolution. Experimental
             results of applying the scheme to real images including
             objects with holes and medical data imagery demonstrate its
             power. The results may be extended to 3D object segmentation
             as well.},
   Doi = {10.1023/A:1007979827043},
   Key = {Caselles1997}
}

@article{fds264775,
   Author = {Sapiro, G},
   Title = {The Vichy government - French - Baruch,MO},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {119},
   Pages = {71-71},
   Year = {1997},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1997XV39400014&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264775}
}

@article{fds264823,
   Author = {Sapiro, G},
   Title = {'Alea' - French - Kjaerstad,J},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {118},
   Pages = {5-5},
   Year = {1997},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1997XH39400004&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264823}
}

@article{fds264834,
   Author = {Black, MJ and Sapiro, G and Marimont, D and Heeger,
             D},
   Title = {Robust anisotropic diffusion: Connections between robust
             statistics, line processing, and anisotropic
             diffusion},
   Journal = {SCALE-SPACE THEORY IN COMPUTER VISION},
   Volume = {1252},
   Pages = {323-326},
   Publisher = {SPRINGER},
   Editor = {Romeny, BMTH and Florack, L and Koenderink, JJ and Viergever,
             MA},
   Year = {1997},
   ISBN = {9783540631675},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/scalespace/scalespace1997.html},
   Abstract = {Relations between anisotropic diffusion and robust
             statistics are described in this paper. We show that
             anisotropic diffusion can be seen as a robust estimation
             procedure that estimates a piecewise smooth image from a
             noisy input image. The "edge-stopping" function in the
             anisotropic diffusion equation is closely related to the
             error norm and influence function in the robust estimation
             framework. This connection leads to a new "edge-stopping"
             function based on Tukey's biweightrobust estimator, that
             preserves sharper boundaries than previous formulations and
             improves the automatic stopping of the diffusion. The robust
             statistical interpretation also provides a means for
             detecting the boundaries (edges) between the piecewise
             smooth regions in the image. Finally, connections between
             robust estimation and line processing provide a framework to
             introduce spatial coherence in anisotropic diffusion
             flows.},
   Doi = {10.1007/3-540-63167-4_27},
   Key = {fds264834}
}

@article{fds264890,
   Author = {Sapiro, G},
   Title = {From active contours to anisotropic diffusion: Connections
             between basic PDE's in image processing},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {477-480},
   Year = {1996},
   Month = {December},
   Abstract = {In this paper, we present mathematical and qualitative
             relations between a number of partial differential equations
             frequently used in image processing and computer vision. We
             show for example that classical active contours introduced
             for object detection by Terzopoulos and colleagues are
             connected to anisotropic diffusion flows as those defined by
             Perona and Malik. We also deal with the relation of these
             flows with shock filters and variational approaches for
             image restoration.},
   Key = {fds264890}
}

@article{fds264892,
   Author = {Sapiro, G},
   Title = {Vector (self) snakes: A geometric framework for color,
             texture, and multiscale image segmentation},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {817-820},
   Year = {1996},
   Month = {December},
   Abstract = {A partial-differential-equations (PDE') based geometric
             framework for segmentation of vector-valued images is
             described in this paper. The first component of this
             approach is based on two dimensional geometric active
             contours deforming from their initial position towards
             objects in the image. The boundaries of these objects are
             then obtained as geodesics or minimal weighted distance
             curves in a Riemannian space. The metric in this space is
             given by a definition of edges in vector-valued images,
             incorporating information from all the image components. The
             curve flow corresponding to these active contours holds
             formal existence, uniqueness, stability, and correctness
             results. Then, embedding the deforming curve as the
             level-set of the image, that is, deforming each one of the
             image components level-sets according to these active
             contours, a system of coupled PDE's is obtained. This system
             deforms the image towards uniform regions, obtaining a
             simplified (or segmented) image. The flow is related to a
             number of PDE's based image processing algorithms as
             anisotropic diffusion and shock filters. The technique is
             applicable to color and texture images, as well as to vector
             data obtained from general image decompositions.},
   Key = {fds264892}
}

@article{fds264893,
   Author = {Giblin, PJ and Sapiro, G},
   Title = {Affine invariant distances, envelopes and symmetry
             sets},
   Journal = {HP Laboratories Technical Report},
   Number = {96-93},
   Pages = {2-30},
   Year = {1996},
   Month = {June},
   Abstract = {This work aims to present and study symmetry sets which are
             affine invariant. Two alternatives to affine invariant
             symmetry sets are presented. The first one is based on a
             definition of affine invariant distances. The second
             approach is based on affine bitangent conics. Following the
             formal definitions of the affine symmetry sets, a number of
             their properties are highlighted.},
   Key = {fds264893}
}

@article{fds264891,
   Author = {Ringach, DL and Sapiro, G and Shapley, R},
   Title = {A simple reverse correlation scheme for the identification
             of visual neurons},
   Journal = {Investigative Ophthalmology and Visual Science},
   Volume = {37},
   Number = {3},
   Pages = {S904},
   Year = {1996},
   Month = {February},
   ISSN = {0146-0404},
   Abstract = {Purpose. The standard approach to generalize the white-noise
             technique to neural systems with multiple inputs consists of
             using a spatio-temporal white noise stimulus. A drawback of
             this methodology is that the input space to be explored is
             huge, and only a sparse coverage can be achieved in limited
             time. We propose a new discrete-time reverse correlation
             technique that effectively reduces the dimension of the
             input space, yielding higher signal to noise ratios. This is
             achieved by exploiting a priori knowledge about the spatial
             tuning properties of the neuron. Results. We first select a
             set S of M orthonormal images of size N2 pixels. The idea is
             to have M ≪ N2 and use previous knowledge about the
             neuron's spatial tuning to select an appropriate input
             space. An input image sequence is generated by selecting, at
             each time, a random element from S. We prove that the
             projection of the receptive field onto the subspace spanned
             by the set S can be estimated based on measurements of the
             crosscorrelation between the input image sequence and the
             cell's output. The technique can also be applied to systems
             that can be modeled as a linear receptive field followed by
             a static nonlinearity. Examples are shown where S is a
             subset of the complete two-dimensional discrete Hartley
             basis functions. Conclusions. A simple reverse correlation
             scheme that only requires the generation of a fixed number
             of images can be used to identify quasi-linear visual
             neurons. Prior knowledge of the spatial tuning of the cell
             can be incorporated in the selection of an effective set of
             stimulus images. We are currently applying this technique to
             the analysis of V1 simple cells.},
   Key = {fds264891}
}

@article{fds264837,
   Author = {Caselles, V and Kimmel, R and Sapiro, G and Sbert,
             C},
   Title = {Three dimensional object modeling via minimal
             surfaces},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {1064},
   Pages = {97-106},
   Publisher = {SPRINGER},
   Editor = {Buxton, BF and Cipolla, R},
   Year = {1996},
   Month = {January},
   ISBN = {9783540611226},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/eccv/eccv1996-1.html},
   Abstract = {A novel geometric approach for 3D object segmentation and
             representation is presented. The scheme is based on
             geometric deformable surfaces moving towards the objects to
             be detected. We show that this model is equivalent to the
             computation of surfaces of minimal area, better known as
             ’minimal surfaces,’ in a Riemannian space. This space is
             defined by a metric induced from the 3D image (volumetric
             data) in which the objects are to be detected. The model
             shows the relation between classical deformable surfaces
             obtained via energy minimization, and geometric ones derived
             from curvature based flows. The new approach is stable,
             robust, and automatically handles changes in the surface
             topology during the deformation. Based on an efficient
             numerical algorithm for surface evolution, we present
             examples of object detection in real and synthetic
             images.},
   Doi = {10.1007/bfb0015526},
   Key = {fds264837}
}

@article{fds264884,
   Author = {Weinberger, MJ and Seroussi, G and Sapiro, G},
   Title = {LOCO-I: a low complexity, context-based, lossless image
             compression algorithm},
   Journal = {Data Compression Conference Proceedings},
   Pages = {140-149},
   Publisher = {IEEE Comput. Soc. Press},
   Year = {1996},
   Month = {January},
   url = {http://dx.doi.org/10.1109/dcc.1996.488319},
   Abstract = {LOCO-I (LOw COmplexity LOssless COmpression for Images) is a
             novel lossless compression algorithm for continuous-tone
             images which combines the simplicity of Huffman coding with
             the compression potential of context models, thus 'enjoying
             the best of both worlds.' The algorithm is based on a simple
             fixed context model, which approaches the capability of the
             more complex universal context modeling techniques for
             capturing high-order dependencies. The model is tuned for
             efficient performance in conjunction with a collection of
             (context-conditioned) Huffman codes, which is realized with
             an adaptive, symbol-wise, Golomb-Rice code. LOCO-I attains,
             in one pass, and without recourse to the higher complexity
             arithmetic coders, compression ratios similar or superior to
             those obtained with state-of-the-art schemes based on
             arithmetic coding. In fact, LOCO-I is being considered by
             the ISO committee as a replacement for the current lossless
             standard in low-complexity applications.},
   Doi = {10.1109/dcc.1996.488319},
   Key = {fds264884}
}

@article{fds264885,
   Author = {Sapiro, G},
   Title = {Vector-valued active contours},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Pages = {680-685},
   Publisher = {IEEE},
   Year = {1996},
   Month = {January},
   url = {http://dx.doi.org/10.1109/cvpr.1996.517146},
   Abstract = {A framework for object segmentation in vector-valued images
             is presented in this paper. The first scheme proposed is
             based on geometric active contours moving towards the
             objects to be detected in the vector-valved image. Objects
             boundaries are obtained as geodesics or minimal weighted
             distance curves in a Riemannian space. The metric in this
             space is given by a definition of edges in vector-valued
             images. The curve flow corresponding to the proposed active
             contours holds formal existence, uniqueness, stability, and
             correctness results. The techniques is applicable for
             example to color and texture images. The scheme
             automatically handles changes in the deforming curve
             topology. We conclude the paper presenting an extension of
             the color active contours which leads to a possible image
             flow for vector-valued image segmentation. The algorithm is
             based on moving each one of the image level-sets according
             to the proposed color active contours. This extension also
             shows the relation of the color geodesic active contours
             with a number of partial-differential-equations based image
             processing algorithms as anisotropic diffusion and shock
             filters.},
   Doi = {10.1109/cvpr.1996.517146},
   Key = {fds264885}
}

@article{fds264886,
   Author = {Sapiro, G and Ringach, DL},
   Title = {Anisotropic diffusion of color images},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {2657},
   Pages = {471-482},
   Publisher = {SPIE},
   Year = {1996},
   Month = {January},
   ISSN = {0277-786X},
   url = {http://dx.doi.org/10.1117/12.238745},
   Abstract = {A new approach for anisotropic diffusion processing of color
             images is proposed. The main idea of the algorithm is to
             facilitate diffusion of the image in the direction parallel
             to color edges. The direction of maximal and minimal color
             change at each point is computed using the first fundamental
             form of the image in (L*a*b*) color space. The image Φ
             evolves according to an anisotropic diffusion flow given by
             δΦ/δt equals g(λ +, λ -)δ 2Φ/δξ 2, where ξ is the
             direction of minimal color change. The diffusion
             coefficient, g(λ +, λ -), is a function of the eigenvalues
             of the first fundamental form, which represent the maximal
             and minimal rates of color change. Examples for real color
             images are presented.},
   Doi = {10.1117/12.238745},
   Key = {fds264886}
}

@article{fds264887,
   Author = {Olver, PJ and Sapiro, G and Tannenbaum, A},
   Title = {Affine invariant detection: edges, active contours, and
             segments},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Pages = {520-525},
   Publisher = {IEEE},
   Year = {1996},
   Month = {January},
   url = {http://dx.doi.org/10.1109/cvpr.1996.517121},
   Abstract = {In this paper we undertake a systematic investigation of
             affine invariant object detection. Edge detection is first
             presented from the point of view of the affine invariant
             scale-space obtained by curvature based motion of the image
             level-sets. In this case, affine invariant edges are
             obtained as a weighted difference of images at different
             scales. We then introduce the affine gradient as the
             simplest possible affine invariant differential function
             which has the same qualitative behavior as the Euclidean
             gradient magnitude. These edge detectors are the basis both
             to extend the affine invariant scale-space to a complete
             affine flow for image denoising and simplification, and to
             define affine invariant active contours for object detection
             and edge integration. The active contours are obtained as a
             gradient flow in a conformally Euclidean space defined by
             the image on which the object is to be detected. That is, we
             show that objects can be segmented in an affine invariant
             manner by computing a path of minimal weighted affine
             distance, the weight being given by functions of the affine
             edge detectors. The geodesic path is computed via an
             algorithm which allows to simultaneously detect any number
             of objects independently of the initial curve
             topology.},
   Doi = {10.1109/cvpr.1996.517121},
   Key = {fds264887}
}

@article{fds264888,
   Author = {Malladi, R and Kimmel, R and Adalsteinsson, D and Sapiro, G and Caselles, V and Sethian, JA},
   Title = {Geometric approach to segmentation and analysis of 3D
             medical images},
   Journal = {Proceedings of the Workship on Mathematical Methods in
             Biomedical Image Analysis},
   Pages = {244-252},
   Publisher = {IEEE},
   Year = {1996},
   Month = {January},
   url = {http://dx.doi.org/10.1109/mmbia.1996.534076},
   Abstract = {A geometric scheme for detecting, representing, and
             measuring 3D medical data is presented. The technique is
             based on deforming 3D surfaces, represented via level-sets,
             towards the medical objects, according to intrinsic
             geometric measures of the data. The 3D medical object is
             represented as a (weighted) minimal surface in a Riemannian
             space whose metric is induced from the image. This minimal
             surface is computed using the level-set methodology for
             propagating interfaces, combined with a narrow band
             technique which allows fast implementation. This computation
             technique automatically handles topological changes.
             Measurements like volume and area are performed on the
             surface, exploiting the representation and the high accuracy
             intrinsic to the algorithm.},
   Doi = {10.1109/mmbia.1996.534076},
   Key = {fds264888}
}

@article{fds264889,
   Author = {Sapiro, G and Ringach, DL},
   Title = {Anisotropic diffusion of multivalued images with
             applications to color filtering.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {5},
   Number = {11},
   Pages = {1582-1586},
   Year = {1996},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18290076},
   Abstract = {A general framework for anisotropic diffusion of multivalued
             images is presented. We propose an evolution equation where,
             at each point in time, the directions and magnitudes of the
             maximal and minimal rate of change in the vector-image are
             first evaluated. These are given by eigenvectors and
             eigenvalues of the first fundamental form in the given image
             metric. Then, the image diffuses via a system of coupled
             differential equations in the direction of minimal change.
             The diffusion "strength" is controlled by a function that
             measures the degree of dissimilarity between the
             eigenvalues. We apply the proposed framework to the
             filtering of color images represented in CIE-L*a*b*
             space.},
   Doi = {10.1109/83.541429},
   Key = {fds264889}
}

@article{fds264751,
   Author = {Ringach, DL and Carandini, M and Sapiro, G and Shapley,
             R},
   Title = {Cortical circuitry revealed by reverse correlation in the
             orientation domain},
   Journal = {PERCEPTION},
   Volume = {25},
   Pages = {31-31},
   Year = {1996},
   ISSN = {0301-0066},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000207910300086&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264751}
}

@article{fds264757,
   Author = {Sapiro, G},
   Title = {Mircea Eliade and the amnesia of history},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {114},
   Pages = {5-5},
   Year = {1996},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1996VG82900005&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264757}
}

@article{fds264759,
   Author = {Sapiro, G},
   Title = {For reasons of literature - French literature during the
             occupation 1940-1944},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {111-12},
   Pages = {3-&},
   Year = {1996},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1996UF63300001&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264759}
}

@article{fds264764,
   Author = {Sapiro, G},
   Title = {Intellectuals in 19th-century Europe - Comparative history -
             French - Charle,C},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {115},
   Pages = {107-107},
   Year = {1996},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1996WB21100019&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264764}
}

@article{fds264766,
   Author = {Ringach, DL and Sapiro, G and Shapley, R},
   Title = {A simple reverse correlation scheme for the identification
             of visual neurons},
   Journal = {INVESTIGATIVE OPHTHALMOLOGY & VISUAL SCIENCE},
   Volume = {37},
   Number = {3},
   Pages = {4178-4178},
   Year = {1996},
   ISSN = {0146-0404},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1996TX39704172&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264766}
}

@article{fds264770,
   Author = {Sapiro, G},
   Title = {Vichy and femininity - Political sociology of the order of
             bodies - French - MuelDreyfus,F},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {113},
   Pages = {103-104},
   Year = {1996},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1996UQ65300012&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264770}
}

@article{fds264791,
   Author = {Sapiro, G},
   Title = {Dans les soulevements - French - Vargaftig,B},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {114},
   Pages = {13-13},
   Year = {1996},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1996VG82900015&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264791}
}

@article{fds264800,
   Author = {Sapiro, G},
   Title = {Boarded-up windows - French - Vona,A},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {114},
   Pages = {3-3},
   Year = {1996},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1996VG82900003&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264800}
}

@article{fds264804,
   Author = {Sapiro, G},
   Title = {Economics and humanism - From public utopia to struggle for
             the Third World 1941-1996 - French - Pelletier,D},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {115},
   Pages = {105-106},
   Year = {1996},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1996WB21100016&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264804}
}

@article{fds264821,
   Author = {Sapiro, G},
   Title = {Salvation through literature and literature of salvation -
             Two different paths of the Catholic novelists: Francois
             Mauriac and Henry Bordeaux},
   Journal = {ACTES DE LA RECHERCHE EN SCIENCES SOCIALES},
   Number = {111-12},
   Pages = {36-&},
   Year = {1996},
   ISSN = {0335-5322},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1996UF63300002&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264821}
}

@article{fds264880,
   Author = {Sapiro, G and Caselles, V},
   Title = {Histogram modification via partial differential
             equations},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {3},
   Pages = {632-635},
   Year = {1995},
   Month = {December},
   Abstract = {An algorithm for histogram modification via image evolution
             equations is first presented in this paper. We show that the
             image histogram can be modified to achieve any given
             distribution as the steady state solution of this partial
             differential equation. We then prove that this equation
             corresponds to a gradient descent flow of a variational
             problem. That is, the proposed PDE is solving an energy
             minimization problem. This gives a new interpretation to
             histogram modification and contrast enhancement in general.
             This interpretation is completely formulated in the image
             domain, in contrast with classical techniques for histogram
             modification which are formulated in a probabilistic domain.
             From this, new algorithms for contrast enhancement, which
             include for example image modeling, can be derived. Based on
             the energy formulation and its corresponding PDE, we show
             that the proposed histogram modification algorithm can be
             combined with denoising schemes. This allows to perform
             simultaneous contrast enhancement and denoising, avoiding
             common noise sharpening effects in classical algorithms. The
             approach is extended to local contrast enhancement as well.
             Theoretical results regarding the existence of solutions to
             the proposed equations are presented.},
   Key = {fds264880}
}

@article{fds264883,
   Author = {Sapiro, G},
   Title = {Geometric partial differential equations in image analysis:
             past, present, and future},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {3},
   Pages = {1-4},
   Year = {1995},
   Month = {December},
   Abstract = {In this paper I briefly discuss the main characteristics of
             the use of partial differential equations and curve/surface
             evolution theory in computer vision and image processing. I
             will describe the approach and its main advantages, together
             with a number of examples.},
   Key = {fds264883}
}

@article{fds264789,
   Author = {Sapiro, G and Caselles, V},
   Title = {An image evolution approach for contrast
             enhancement},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {2567},
   Pages = {19-30},
   Publisher = {SPIE},
   Year = {1995},
   Month = {September},
   ISBN = {0-8194-1926-5},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1995BE16B00003&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Abstract = {An algorithm for histogram modification via image evolution
             equations is first presented in this paper. We show that the
             image histogram can be modified to achieve any given
             distribution as the steady state solution of this partial
             differential equation. We then prove that this equation
             corresponds to a gradient descent flow of a variational
             problem. That is, the proposed PDE is solving an energy
             minimization problem. This gives a new interpretation to
             histogram modification and contrast enhancement in general.
             This interpretation is completely formulated in the image
             domain, in contrast with classical techniques for histogram
             modification which are formulated in a probabilistic domain.
             From this, new algorithms for contrast enhancement, which
             include for example image modeling, can be derived. Based on
             the energy formulation and its corresponding PDE, we show
             that the proposed histogram modification algorithm can be
             combined with denoising schemes. This allows to perform
             simultaneous contrast enhancement and denoising, avoiding
             common noise sharpening effects in classical algorithms. The
             approach is extended to local contrast enhancement as well.
             Theoretical results regarding the existence of solutions of
             the proposed equations are presented.},
   Doi = {10.1117/12.218484},
   Key = {fds264789}
}

@article{fds264793,
   Author = {Sapiro, G and Kimmel, R and Caselles, V},
   Title = {Object detection and measurements in medical images via
             geodesic deformable contours},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {2573},
   Pages = {366-378},
   Publisher = {SPIE},
   Year = {1995},
   Month = {August},
   ISBN = {0-8194-1932-X},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1995BD88U00035&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Abstract = {A novel scheme for performing detection and measurements in
             medical images is presented. The technique is based on
             active contours evolving in time according to intrinsic
             geometric measures of the image. The evolving contours
             naturally split and merge, allowing the simultaneous
             detection of several objects and both interior and exterior
             boundaries. The proposed approach is based on the relation
             between active contours and the computation of geodesics or
             minimal distance curves. The minimal distance curve lays in
             a Riemannian space whose metric is defined by the image
             content. Measurements are performed after the object is
             detected. Due to the high accuracy achieved by the proposed
             geodesic approach, it is natural to use it to compute area
             or length of the detected object, which are of extreme value
             for diagnosis. Open curves with fix boundaries are computed
             as well. This addition to the deformable model adds
             flexibility, allowing the user to choose guiding points in
             the image or to select regions for measurements.
             Experimental results of applying the scheme to real medical
             images demonstrate its potential. The results may be
             extended to 3D object segmentation as well.},
   Doi = {10.1117/12.216429},
   Key = {fds264793}
}

@article{fds264810,
   Author = {Sapiro, G and Caselles, V},
   Title = {Simultaneous contrast improvement and denoising via
             diffusion related equations},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {2573},
   Pages = {342-353},
   Publisher = {SPIE},
   Year = {1995},
   Month = {August},
   ISBN = {0-8194-1932-X},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1995BD88U00033&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Abstract = {The explicit use of partial differential equations (PDE's)
             in image processing became a major topic of study in the
             last years. In this work we present an algorithm for
             histogram modification via PDE's. We show that the histogram
             can be modified to achieve any given distribution. The
             modification can be performed while simultaneously reducing
             noise. This avoids the noise sharpening effect in classical
             algorithms. The approach is extended to local contrast
             enhancement as well. A variational interpretation of the
             flow is presented and theoretical results on the existence
             of solutions are given.},
   Doi = {10.1117/12.216427},
   Key = {fds264810}
}

@article{fds264787,
   Author = {Sapiro, G and Steel, J},
   Title = {Litteratures de l'ombre},
   Journal = {Le Mouvement social},
   Number = {171},
   Pages = {109-109},
   Publisher = {JSTOR},
   Year = {1995},
   Month = {April},
   ISSN = {0027-2671},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1995RV12500013&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.2307/3779547},
   Key = {fds264787}
}

@article{fds264881,
   Author = {Sapiro, G and Bruckstein, AM},
   Title = {The ubiquitous ellipse},
   Journal = {Acta Applicandae Mathematicae},
   Volume = {38},
   Number = {2},
   Pages = {149-161},
   Publisher = {Springer Nature},
   Year = {1995},
   Month = {February},
   ISSN = {0167-8019},
   url = {http://dx.doi.org/10.1007/BF00992844},
   Abstract = {We discuss three different affine invariant evolution
             processes for smoothing planar curves. The first one is
             derived from a geometric heat-type flow, both the initial
             and the smoothed curves being differentiable. The second
             smoothing process is obtained from a discretization of this
             affine heat equation. In this case, the curves are
             represented by planar polygons. The third process is based
             on B-spline approximations. For this process, the initial
             curve is a planar polygon, and the smoothed curves are
             differentiable and even analytic. We show that, in the
             limit, all three affine invariant smoothing processes
             collapse any initial curve into an elliptic point. © 1995
             Kluwer Academic Publishers.},
   Doi = {10.1007/BF00992844},
   Key = {fds264881}
}

@article{fds264877,
   Author = {Caselles, V and Kimmel, R and Sapiro, G},
   Title = {Geodesic active contours},
   Journal = {IEEE International Conference on Computer
             Vision},
   Pages = {694-699},
   Publisher = {IEEE Comput. Soc. Press},
   Year = {1995},
   Month = {January},
   url = {http://dx.doi.org/10.1109/iccv.1995.466871},
   Abstract = {A novel scheme for the detection of object boundaries is
             presented. The technique is based on active contours
             deforming according to intrinsic geometric measures of the
             image. The evolving contours naturally split and merge,
             allowing the simultaneous detection of several objects and
             both interior and exterior boundaries. The proposed approach
             is based on the relation between active contours and the
             computation of geodesics or minimal distance curves. The
             minimal distance curve lays in a Riemannian space whose
             metric is defined by the image content. This geodesic
             approach for object segmentation allows to connect classical
             'snakes' based on energy minimization and geometric active
             contours based on the theory of curve evolution. Previous
             models of geometric active contours are improved as showed
             by a number of examples. Formal results concerning
             existence, uniqueness, stability, and correctness of the
             evolution are presented as well.},
   Doi = {10.1109/iccv.1995.466871},
   Key = {fds264877}
}

@article{fds264878,
   Author = {Tannenbaum, A and Sapiro, G},
   Title = {Area and Length Preserving Geometric Invariant
             Scale-Spaces},
   Journal = {IEEE Transactions on Pattern Analysis and Machine
             Intelligence},
   Volume = {17},
   Number = {1},
   Pages = {67-72},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1995},
   Month = {January},
   url = {http://dx.doi.org/10.1109/34.368150},
   Abstract = {In this paper, area preserving multi-scale representations
             of planar curves are described. This allows smoothing
             without shrinkage at the same time preserving all the
             scale-space properties. The representations are obtained
             deforming the curve via geometric heat flows while
             simultaneously magnifying the plane by a homethety which
             keeps the enclosed area constant When the Euclidean
             geometric heat flow is used, the resulting representation is
             Euclidean invariant, and similarly it is affine invariant
             when the affine one is used. The flows are geometrically
             intrinsic to the curve, and exactly satisfy all the basic
             requirements of scale-space representations. In the case of
             the Euclidean heat flow, it is completely local as well. The
             same approach is used to define length preserving geometric
             flows. A similarity (scale) invariant geometric heat flow is
             studied as well in this work. © 1995 IEEE},
   Doi = {10.1109/34.368150},
   Key = {fds264878}
}

@article{fds264882,
   Author = {Kimmel, R and Sapiro, G},
   Title = {Shortening three-dimensional curves via two-dimensional
             flows},
   Journal = {Computers and Mathematics with Applications},
   Volume = {29},
   Number = {3},
   Pages = {49-62},
   Publisher = {Elsevier BV},
   Year = {1995},
   Month = {January},
   ISSN = {0898-1221},
   url = {http://dx.doi.org/10.1016/0898-1221(94)00228-D},
   Abstract = {In this paper, a curve evolution approach for the
             computation of geodesic curves on 3D surfaces is presented.
             The algorithm is based on deforming, via the curve
             shortening flow, an arbitrary initial curve ending at two
             given surface points. The 3D curve shortening flow is first
             transformed into an equivalent 2D one. This 2D flow is
             implemented, using an efficient numerical algorithm for
             curve evolution with fixed end points. ©
             1995.},
   Doi = {10.1016/0898-1221(94)00228-D},
   Key = {fds264882}
}

@article{fds264765,
   Author = {POLLICK, FE and SAPIRO, G},
   Title = {CONSTANT AFFINE VELOCITY AND THE GENERATION AND PERCEPTION
             OF UNIFORM PLANAR MOTION},
   Journal = {INVESTIGATIVE OPHTHALMOLOGY & VISUAL SCIENCE},
   Volume = {36},
   Number = {4},
   Pages = {S360-S360},
   Year = {1995},
   ISSN = {0146-0404},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1995QM91501687&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264765}
}

@article{fds264879,
   Author = {Bruckstein, AM and Sapiro, G and Shaked, D},
   Title = {Evolutions of planar polygons},
   Journal = {International Journal of Pattern Recognition and Artificial
             Intelligence},
   Volume = {9},
   Number = {6},
   Pages = {991-1014},
   Publisher = {World Scientific Pub Co Pte Lt},
   Year = {1995},
   url = {http://dx.doi.org/10.1142/S0218001495000407},
   Abstract = {Evolutions of closed planar polygons are studied in this
             work. In the first part of the paper, the general theory of
             linear polygon evolutions is presented, and two specific
             problems are analyzed. The first one is a polygonal analog
             of a novel affine-invariant differential curve evolution,
             for which the convergence of planar curves to ellipses was
             proved. In the polygon case, convergence to polygonal
             approximation of ellipses, polygonal ellipses, is proven.
             The second one is related to cyclic pursuit problems, and
             convergence, either to polygonal ellipses or to polygonal
             circles, is proven. In the second part, two possible
             polygonal analogues of the well-known Euclidean curve
             shortening flow are presented. The models follow from
             geometric considerations. Experimental results show that an
             arbitrary initial polygon converges to either regular or
             irregular polygonal approximations of circles when evolving
             according to the proposed Euclidean flows.},
   Doi = {10.1142/S0218001495000407},
   Key = {fds264879}
}

@article{fds264797,
   Author = {Sapiro, G},
   Title = {Geometric invariant signatures and flows: Classification and
             applications in image analysis},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {2277},
   Pages = {275-287},
   Publisher = {SPIE},
   Year = {1994},
   Month = {October},
   ISBN = {0-8194-1601-0},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1994BB97C00027&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Abstract = {Based on modern invariant theory and symmetry groups, a high
             level way of defining invariant geometricflows for a given
             Lie group is described in this work. We then analyze in more
             detail different subgroups ofthe projective group, which are
             of special interest for computer vision. We classify the
             corresponding invariantflows and show that the geometric
             heat flow is the simplest possible one. Results on invariant
             geometric flows ofsurfaces are presented in this paper as
             well. We then show how the planar curve flow obtained for
             the affine groupcan be used for geometric smoothing of
             planar shapes and edge preserving enhancement of MRI. We
             concludethe paper with the presentation of an affine
             invariant geometric edge detector obtained from the
             classification ofaffine differential invariants.},
   Doi = {10.1117/12.191890},
   Key = {fds264797}
}

@article{fds264845,
   Author = {Sapiro, G and Tannenbaum, A},
   Title = {Area and length preserving geometric invariant
             scale-spaces},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {801 LNCS},
   Pages = {449-458},
   Publisher = {SPRINGER},
   Editor = {Eklundh, J-O},
   Year = {1994},
   Month = {January},
   ISBN = {9783540579571},
   url = {http://www.informatik.uni-trier.de/~ley/db/conf/eccv/eccv1994-2.html},
   Abstract = {In this paper, area preserving geometric multi-scale
             representations of planar curves are described. This allows
             geometric smoothing without shrinkage at the same time
             preserving all the scale-space properties. The
             representations are obtained deforming the curve via
             invariant geometric heat flows while simultaneously
             magnifying the plane by a homethety which keeps the enclosed
             area constant. The flows are geometrically intrinsic to the
             curve, and exactly satisfy all the basic requirements of
             scale-space representations. In the case of the Euclidean
             heat flow for example, it is completely local as well. The
             same approach is used to define length preserving geometric
             flows. The geometric scalespaces are implemented using an
             efficient numerical algorithm.},
   Doi = {10.1007/bfb0028376},
   Key = {fds264845}
}

@article{fds264850,
   Author = {Sapiro, G and Tannenbaum, A and You, YL and Kaveh,
             M},
   Title = {Experiments on geometric image enhancement},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {2},
   Pages = {472-476},
   Publisher = {IEEE Comput. Soc. Press},
   Year = {1994},
   Month = {January},
   ISBN = {0818669527},
   url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=3232},
   Abstract = {In this paper we experiments with geometric algorithms for
             image smoothing. Examples are given for MRI and ATR data. We
             emphasize experiments with the affine invariant geometric
             smoother or affine heat equation, originally developed for
             binary shape smoothing, and found to be efficient for
             gray-level images as well. Efficient numerical
             implementations of these flows give anisotropic diffusion
             processes which preserve edges.},
   Doi = {10.1109/ICIP.1994.413615},
   Key = {fds264850}
}

@article{fds264876,
   Author = {Sapiro, G and Malah, D},
   Title = {Morphological image coding based on a geometric sampling
             theorem and a modified skeleton representation},
   Journal = {Journal of Visual Communication and Image
             Representation},
   Volume = {5},
   Number = {1},
   Pages = {29-40},
   Publisher = {Elsevier BV},
   Year = {1994},
   Month = {January},
   ISSN = {1047-3203},
   url = {http://dx.doi.org/10.1006/jvci.1994.1003},
   Abstract = {A new approach for gray-level image coding using binary
             morphological operations on the image bit-planes is
             presented. This approach is based on a Geometric Sampling
             Theorem (GST), and on a modified morphological skeleton. The
             theorem, which is proved in this paper, states conditions
             for the reconstruction of the boundary of a continuous two
             level image from a unique subset of points of its skeleton
             representation. This set of points, referred to as essential
             points, is found to play an important role in the skeleton
             representation of discrete binary images as well. The
             modified morphological skeleton (MMS) uses an exponentially
             increasing in size structuring element. The computational
             advantage of this representation was previously reported. A
             new approach to its development is presented here, and its
             advantage in image coding is demonstrated. The coding scheme
             consists of the following steps: First, the image is
             preprocessed by an error-diffusion technique in order to
             reduce the number of bit-planes from 8 to 4 without
             significant quality degradation. The pixel values are
             subsequently converted to Gray-code. The bit-planes are
             represented by the MMS. Redundancy in this representation is
             reduced using an algorithm motivated by the GST. These
             reduced modified morphological skeletons are coded with an
             entropy coding scheme particularly devised for efficient
             skeleton coding. The possibility of the introduction of
             geometric errors to reduce the bit-rate is also discussed.
             Compression ratios of up to 11:1 were obtained for satellite
             images. © 1994 Academic Press. All rights
             reserved.},
   Doi = {10.1006/jvci.1994.1003},
   Key = {fds264876}
}

@article{SAPIRO1994,
   Author = {Sapiro, G and Tannenbaum, A},
   Title = {On affine plane curve evolution},
   Journal = {Journal of Functional Analysis},
   Volume = {119},
   Number = {1},
   Pages = {79-120},
   Publisher = {Elsevier BV},
   Year = {1994},
   Month = {January},
   ISSN = {0022-1236},
   url = {http://dx.doi.org/10.1006/jfan.1994.1004},
   Abstract = {An affine invariant curve evolution process is presented in
             this work. The evolution studied is the affine analogue of
             the Euclidean Curve Shortening flow. Evolution equations,
             for both affine and Euclidean invariants, are developed. An
             affine version of the classical (Euclidean) isoperimetric
             inequality is proved. This inequality is used to show that
             in the case of affine evolution of convex plane curves, the
             affine isoperimetric ratio is a non-decreasing function of
             time. Convergence of this affine isoperimetric ratio to the
             ellipse′s value (8π2), as well as convergence, in the
             Hausdorff metric, of the evolving curve to an ellipse, is
             also proved. © 1994 Academic Press Inc.},
   Doi = {10.1006/jfan.1994.1004},
   Key = {SAPIRO1994}
}

@article{fds264749,
   Author = {SAPIRO, G and BRUCKSTEIN, AM},
   Title = {The ubiquitous ellipse},
   Journal = {CURVES AND SURFACES IN GEOMETRIC DESIGN},
   Pages = {409-418},
   Year = {1994},
   ISBN = {1-56881-039-3},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1994BD37S00050&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264749}
}

@article{fds264820,
   Author = {OLVER, PJ and SAPIRO, G and TANNENBAUM, A},
   Title = {CLASSIFICATION AND UNIQUENESS OF INVARIANT GEOMETRIC
             FLOWS},
   Journal = {COMPTES RENDUS DE L ACADEMIE DES SCIENCES SERIE
             I-MATHEMATIQUE},
   Volume = {319},
   Number = {4},
   Pages = {339-344},
   Year = {1994},
   ISSN = {0764-4442},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1994PD87100006&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264820}
}

@article{fds264875,
   Author = {Sapiro, G and Tannenbaum, A},
   Title = {Affine invariant scale-space},
   Journal = {International Journal of Computer Vision},
   Volume = {11},
   Number = {1},
   Pages = {25-44},
   Publisher = {Springer Nature},
   Year = {1993},
   Month = {August},
   ISSN = {0920-5691},
   url = {http://dx.doi.org/10.1007/BF01420591},
   Abstract = {A new affine invariant scale-space for planar curves is
             presented in this work. The scale-space is obtained from the
             solution of a novel nonlinear curve evolution equation which
             admits affine invariant solutions. This flow was proved to
             be the affine analogue of the well known Euclidean
             shortening flow. The evolution also satisfies properties
             such as causality, which makes it useful in defining a
             scale-space. Using an efficient numerical algorithm for
             curve evolution, this continuous affine flow is implemented,
             and examples are presented. The affine-invariant progressive
             smoothing property of the evolution equation is demonstrated
             as well. © 1993 Kluwer Academic Publishers.},
   Doi = {10.1007/BF01420591},
   Key = {fds264875}
}

@article{fds264873,
   Author = {Sapiro, G and Kimmel, R and Shaked, D and Kimia, BB and Bruckstein,
             AM},
   Title = {Implementing continuous-scale morphology via curve
             evolution},
   Journal = {Pattern Recognition},
   Volume = {26},
   Number = {9},
   Pages = {1363-1372},
   Publisher = {Elsevier BV},
   Year = {1993},
   Month = {January},
   ISSN = {0031-3203},
   url = {http://dx.doi.org/10.1016/0031-3203(93)90142-J},
   Abstract = {A new approach to digital implementation of continuous-scale
             mathematical morphology is presented. The approach is based
             on discretization of evolution equations associated with
             continuous multiscale morphological operations. Those
             equations, and their corresponding numerical implementation,
             can be derived either directly from mathematical morphology
             definitions or from curve evolution theory. The advantages
             of the proposed approach over the classical discrete
             morphology are demonstrated. © 1993.},
   Doi = {10.1016/0031-3203(93)90142-J},
   Key = {fds264873}
}

@article{fds264874,
   Author = {Sapiro, G and Tannenbaum, AR},
   Title = {Formulating invariant heat-type curve flows},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {2031},
   Pages = {234-245},
   Publisher = {SPIE},
   Year = {1993},
   Month = {January},
   url = {http://dx.doi.org/10.1117/12.146629},
   Abstract = {We describe a geometric method for formulating planar curve
             evolution equations which are invariant under a certain
             transformation group. The approach is based on concepts from
             the classical theory of differential invariants. The flows
             we obtain are geometric analogues of the classical heat
             equation, and can be used to define invariant scale-spaces.
             We give a `high-level' general procedure for the
             construction of these flows. Examples are presented for
             viewing transformations.},
   Doi = {10.1117/12.146629},
   Key = {fds264874}
}

@article{fds264756,
   Author = {SAPIRO, G and TANNENBAUM, A},
   Title = {On Invariant Curve Evolution and Image Analysis},
   Journal = {INDIANA UNIVERSITY MATHEMATICS JOURNAL},
   Volume = {42},
   Number = {3},
   Pages = {985-1009},
   Year = {1993},
   ISSN = {0022-2518},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1993MU69400017&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Doi = {10.1512/iumj.1993.42.42046},
   Key = {fds264756}
}

@article{fds264799,
   Author = {SAPIRO, G and TANNENBAUM, A},
   Title = {IMAGE SMOOTHING BASED ON AN AFFINE INVARIANT CURVE
             FLOW},
   Journal = {PROCEEDINGS OF THE TWENTY-SEVENTH ANNUAL CONFERENCE ON
             INFORMATION SCIENCES AND SYSTEMS},
   Pages = {196-201},
   Year = {1993},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1993BB64Q00041&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264799}
}

@article{fds264814,
   Author = {SAPIRO, G},
   Title = {THE ACADEMIE-FRANCAISE AND THE ACADEMIE-GONCOURT DURING THE
             1940S - FUNCTION AND OPERATION OF LITERARY INSTITUTIONS IN A
             PERIOD OF NATIONAL CRISIS},
   Journal = {TEXTE-REVUE DE CRITIQUE ET DE THEORIE LITTERAIRE},
   Number = {12},
   Pages = {151-196},
   Year = {1992},
   ISSN = {0715-8920},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1992MR14900011&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264814}
}

@article{fds344655,
   Author = {Sapiro, G and Malah, D},
   Title = {Morphological image coding via bit-plane decomposition and a
             new skeleton representation},
   Journal = {Proceedings - 17th Convention of Electrical and Electronics
             Engineers in Israel, EEIS 1991},
   Pages = {174-177},
   Year = {1991},
   Month = {January},
   ISBN = {9780879426781},
   url = {http://dx.doi.org/10.1109/EEIS.1991.217670},
   Abstract = {A new approach for image coding based on bit-plane
             decomposition and binary morphological operations is
             presented. The image is first processed by an errordiffusion
             technique in order to reduce the number of bitplanes without
             a significant quality degradation. The bitplanes of the
             resulting image are converted to Gray code and are
             represented by a modified morphological skeleton which uses
             an increasing size structuring element. Redundancy in this
             representation is reduced with an algorithm motivated by a
             Geometric Sampling Theorem which we present. These reduced
             modified morphological skeletons are coded with an entropy
             coding scheme which was particularly devised for efficient
             skeleton coding. A postprocessing operation, as well as the
             possibility of geometric progressive transmission, are also
             discused.},
   Doi = {10.1109/EEIS.1991.217670},
   Key = {fds344655}
}

@article{fds264754,
   Author = {SAPIRO, G and MALAH, D},
   Title = {A GEOMETRIC SAMPLING THEOREM AND ITS APPLICATION IN
             MORPHOLOGICAL IMAGE-CODING},
   Journal = {DIGITAL SIGNAL PROCESSING - 91},
   Pages = {410-415},
   Year = {1991},
   ISBN = {0-444-88890-X},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:A1991BV45K00070&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Key = {fds264754}
}

@article{fds264867,
   Author = {Whitsel, BL and Petrucelli, LM and Sapiro, G and Ha,
             H},
   Title = {Fiber sorting in the fasciculus gracilis of squirrel
             monkeys.},
   Journal = {Experimental neurology},
   Volume = {29},
   Number = {2},
   Pages = {227-242},
   Year = {1970},
   Month = {November},
   ISSN = {0014-4886},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/4994110},
   Doi = {10.1016/0014-4886(70)90054-3},
   Key = {fds264867}
}

@article{fds264866,
   Author = {Whitsel, BL and Petrucelli, LM and Sapiro, G},
   Title = {Modality representation in the lumbar and cervical
             fasciculus gracilis of squirrel monkeys.},
   Journal = {Brain research},
   Volume = {15},
   Number = {1},
   Pages = {67-78},
   Year = {1969},
   Month = {September},
   ISSN = {0006-8993},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/4241233},
   Doi = {10.1016/0006-8993(69)90310-2},
   Key = {fds264866}
}

 

dept@math.duke.edu
ph: 919.660.2800
fax: 919.660.2821

Mathematics Department
Duke University, Box 90320
Durham, NC 27708-0320