-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathreferences.bib
744 lines (639 loc) · 50.1 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
@Article{ALH+2017,
author="Andersson, Richard
and Larsson, Linnea
and Holmqvist, Kenneth
and Stridh, Martin
and Nystr{\"o}m, Marcus",
title="One algorithm to rule them all? An evaluation and discussion of ten eye movement event-detection algorithms",
journal="Behavior Research Methods",
year="2017",
month="Apr",
day="01",
volume="49",
number="2",
pages="616--637",
abstract="Almost all eye-movement researchers use algorithms to parse raw data and detect distinct types of eye movement events, such as fixations, saccades, and pursuit, and then base their results on these. Surprisingly, these algorithms are rarely evaluated. We evaluated the classifications of ten eye-movement event detection algorithms, on data from an SMI HiSpeed 1250 system, and compared them to manual ratings of two human experts. The evaluation focused on fixations, saccades, and post-saccadic oscillations. The evaluation used both event duration parameters, and sample-by-sample comparisons to rank the algorithms. The resulting event durations varied substantially as a function of what algorithm was used. This evaluation differed from previous evaluations by considering a relatively large set of algorithms, multiple events, and data from both static and dynamic stimuli. The main conclusion is that current detectors of only fixations and saccades work reasonably well for static stimuli, but barely better than chance for dynamic stimuli. Differing results across evaluation methods make it difficult to select one winner for fixation detection. For saccade detection, however, the algorithm by Larsson, Nystr{\"o}m and Stridh (IEEE Transaction on Biomedical Engineering, 60(9):2484--2493,2013) outperforms all algorithms in data from both static and dynamic stimuli. The data also show how improperly selected algorithms applied to dynamic data misestimate fixation and saccade properties.",
issn="1554-3528",
doi="10.3758/s13428-016-0738-9"
}
@article{jaccard1901etude,
title={{\'E}tude comparative de la distribution florale dans une portion des Alpes et des Jura},
author={Jaccard, Paul},
journal={Bull Soc Vaudoise Sci Nat},
volume={37},
pages={547--579},
year={1901}
}
@article{amit2017temporal,
title={Temporal dynamics of saccades explained by a self-paced process},
author={Amit, Roy and Abeles, Dekel and Bar-Gad, Izhar and Yuval-Greenberg, Shlomit},
journal={Scientific reports},
volume={7},
number={1},
pages={886},
year={2017},
doi = {10.1038/s41598-017-00881-7},
publisher={Nature Publishing Group}
}
@article{bahill1975main,
title={The main sequence, a tool for studying human eye movements},
author={Bahill, A Terry and Clark, Michael R and Stark, Lawrence},
journal={Mathematical Biosciences},
volume={24},
number={3-4},
pages={191--204},
year={1975},
publisher={Elsevier},
url={https://doi.org/10.1016/0025-5564(75)90075-9}
}
@article{GOLTZ1997789,
title = "Vertical Eye Position Control in Darkness: Orbital Position and Body Orientation Interact to Modulate Drift Velocity",
journal = "Vision Research",
volume = "37",
number = "6",
pages = "789 - 798",
year = "1997",
issn = "0042-6989",
doi = "https://doi.org/10.1016/S0042-6989(96)00217-9",
author = "H.C Goltz and E.L Irving and M.J Steinbach and M EizenamnIZENMAN"
}
@article{cherici2012,
author = {Cherici, Claudia and Kuang, Xutao and Poletti, Martina and Rucci, Michele},
title = "{Precision of sustained fixation in trained and untrained observers}",
journal = {Journal of Vision},
volume = {12},
number = {6},
pages = {31-31},
year = {2012},
month = {06},
issn = {1534-7362},
doi = {10.1167/12.6.31},
}
@thesis{Sch2017,
author = {Ulrike Schnaithman},
title = {Combining and testing filter and detection algorithms for post-experimental analysis of eye tracking data on dynamic stimuli},
note = {B.Sc. thesis submitted to the faculty of natural sciences at the Otto von Guericke University, Magdeburg, Germany},
year = 2017
}
@misc{michael_hanke_2019_2651042,
author = {Michael Hanke and
Asim H Dar and
Adina Wagner},
title = {psychoinformatics-de/remodnav: Submission time},
month = apr,
year = 2019,
doi = {10.5281/zenodo.2651042},
}
@article{wakefulness,
title = "Decoding Wakefulness Levels from Typical fMRI Resting-State Data Reveals Reliable Drifts between Wakefulness and Sleep",
journal = "Neuron",
volume = "82",
number = "3",
pages = "695 - 708",
year = "2014",
issn = "0896-6273",
doi = "https://doi.org/10.1016/j.neuron.2014.03.020",
url = "http://www.sciencedirect.com/science/article/pii/S0896627314002505",
author = "Enzo Tagliazucchi and Helmut Laufs"
}
@article{Hannula2010,
abstract = {Results of several investigations indicate that eye movements can reveal memory for elements of previous experience. These effects of memory on eye movement behavior can emerge very rapidly, changing the efficiency and even the nature of visual processing without appealing to verbal reports and without requiring conscious recollection. This aspect of eye-movement based memory investigations is particularly useful when eye movement methods are used with special populations (e.g., young children, elderly individuals, and patients with severe amnesia), and also permits use of comparable paradigms in animals and humans, helping to bridge different memory literatures and permitting cross-species generalizations. Unique characteristics of eye movement methods have produced findings that challenge long-held views about the nature of memory, its organization in the brain, and its failures in special populations. Recently, eye movement methods have been successfully combined with neuroimaging techniques such as fMRI, single-unit recording, and MEG, permitting more sophisticated investigations of memory. Ultimately, combined use of eye-tracking with neuropsychological and neuroimaging methods promises to provide a more comprehensive account of brain-behavior relationships and adheres to the {\&}{\#}8220;converging evidence{\&}{\#}8221; approach to cognitive neuroscience.},
author = {Hannula, Deborah E. and Althoff, Robert R and Warren, David E and Riggs, Lily and Cohen, Neal J and Ryan, Jennifer D},
doi = {10.3389/fnhum.2010.00166},
file = {:C$\backslash$:/Users/Asim H. Dar/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Hannula et al. - 2010 - Worth a glance using eye movements to investigate the cognitive neuroscience of memory.pdf:pdf},
issn = {16625161},
journal = {Frontiers in Human Neuroscience},
keywords = {Amnesia,Eye Movements,Hippocampus,MEG,Memory,fMRI},
month = {oct},
pages = {166},
publisher = {Frontiers},
title = {{Worth a glance: using eye movements to investigate the cognitive neuroscience of memory}},
volume = {4},
year = {2010}
}
@article{Andersson2017,
abstract = {Almost all eye-movement researchers use algorithms to parse raw data and detect distinct types of eye movement events, such as fixations, saccades, and pursuit, and then base their results on these. Surprisingly, these algorithms are rarely evaluated. We evaluated the classifications of ten eye-movement event detection algorithms, on data from an SMI HiSpeed 1250 system, and compared them to manual ratings of two human experts. The evaluation focused on fixations, saccades, and post-saccadic oscillations. The evaluation used both event duration parameters, and sample-by-sample comparisons to rank the algorithms. The resulting event durations varied substantially as a function of what algorithm was used. This evaluation differed from previous evaluations by considering a relatively large set of algorithms, multiple events, and data from both static and dynamic stimuli. The main conclusion is that current detectors of only fixations and saccades work reasonably well for static stimuli, but barely better than chance for dynamic stimuli. Differing results across evaluation methods make it difficult to select one winner for fixation detection. For saccade detection, however, the algorithm by Larsson, Nystr{\"{o}}m and Stridh (IEEE Transaction on Biomedical Engineering, 60(9):2484-2493,2013) outperforms all algorithms in data from both static and dynamic stimuli. The data also show how improperly selected algorithms applied to dynamic data misestimate fixation and saccade properties.},
author = {Andersson, Richard and Larsson, Linnea and Holmqvist, Kenneth and Stridh, Martin and Nystr{\"{o}}m, Marcus},
doi = {10.3758/s13428-016-0738-9},
issn = {1554-3528},
journal = {Behavior Research Methods},
keywords = {Eye-tracking,Inter-rater reliability,Parsing},
month = {apr},
number = {2},
pages = {616--637},
pmid = {27193160},
title = {{One algorithm to rule them all? An evaluation and discussion of ten eye movement event-detection algorithms}},
volume = {49},
year = {2017}
}
@inproceedings{Holmqvist2012,
address = {New York, New York, USA},
author = {Holmqvist, Kenneth and Nystr{\"{o}}m, Marcus and Mulvey, Fiona},
booktitle = {Proceedings of the Symposium on Eye Tracking Research and Applications - ETRA '12},
doi = {10.1145/2168556.2168563},
isbn = {9781450312219},
keywords = {accuracy,data quality,eye movements,eye tracker,latency,precision},
pages = {45},
publisher = {ACM Press},
title = {{Eye tracker data quality}},
year = {2012}
}
@article{Toiviainen2014,
abstract = {We investigated neural correlates of musical feature processing with a decoding approach. To this end, we used a method that combines computational extraction of musical features with regularized multiple regression (LASSO). Optimal model parameters were determined by maximizing the decoding accuracy using a leave-one-out cross-validation scheme. The method was applied to functional magnetic resonance imaging (fMRI) data that were collected using a naturalistic paradigm, in which participants' brain responses were recorded while they were continuously listening to pieces of real music. The dependent variables comprised musical feature time series that were computationally extracted from the stimulus. We expected timbral features to obtain a higher prediction accuracy than rhythmic and tonal ones. Moreover, we expected the areas significantly contributing to the decoding models to be consistent with areas of significant activation observed in previous research using a naturalistic paradigm with fMRI. Of the six musical features considered, five could be significantly predicted for the majority of participants. The areas significantly contributing to the optimal decoding models agreed to a great extent with results obtained in previous studies. In particular, areas in the superior temporal gyrus, Heschl's gyrus, Rolandic operculum, and cerebellum contributed to the decoding of timbral features. For the decoding of the rhythmic feature, we found the bilateral superior temporal gyrus, right Heschl's gyrus, and hippocampus to contribute most. The tonal feature, however, could not be significantly predicted, suggesting a higher inter-participant variability in its neural processing. A subsequent classification experiment revealed that segments of the stimulus could be classified from the fMRI data with significant accuracy. The present findings provide compelling evidence for the involvement of the auditory cortex, the cerebellum and the hippocampus in the processing of musical features during continuous listening to music.},
author = {Toiviainen, Petri and Alluri, Vinoo and Brattico, Elvira and Wallentin, Mikkel and Vuust, Peter},
doi = {10.1016/J.NEUROIMAGE.2013.11.017},
issn = {1053-8119},
journal = {NeuroImage},
month = {mar},
pages = {170--180},
publisher = {Academic Press},
title = {{Capturing the musical brain with Lasso: Dynamic decoding of musical features from fMRI data}},
volume = {88},
year = {2014}
}
@article{Holsanova2006,
abstract = {The aim of this article is to compare general assumptions about newspaper reading with eye-tracking data from readers' actual interaction with a newspaper. First, we extract assumptions about the way people read newspapers from socio-semiotic research. Second, we apply these assumptions by analysing a newspaper spread; this is done without any previous knowledge of actual reading behaviour. Finally, we use eye-tracking to empirically examine so-called entry points and reading paths. Eye movement data on reading newspaper spreads are analysed in three different ways: the time sequence in which different areas attract attention is calculated in order to determine reading priorities; the amount of time spent on different areas is calculated in order to determine which areas have been read most; the depth of attention is calculated in order to determine how carefully those areas have been read. General assumptions extracted from the socio-semiotic framework are compared to the results of the actual behaviour of subjects reading the newspaper spread. The results show that the empirical data confirm some of the extracted assumptions. The reading paths of the five subjects participating in the eye-tracking tests suggest that there are three main categories of readers: editorial readers, overview readers and focused readers.},
author = {Holsanova, Jana and Rahm, Henrik and Holmqvist, Kenneth},
doi = {10.1177/1470357206061005},
issn = {1470-3572},
journal = {Visual Communication},
month = {feb},
number = {1},
pages = {65--93},
publisher = {Sage PublicationsSage CA: Thousand Oaks, CA},
title = {{Entry points and reading paths on newspaper spreads: comparing a semiotic analysis with eye-tracking measurements}},
volume = {5},
year = {2006}
}
@article{Gordon2006,
author = {Gordon, Peter C. and Hendrick, Randall and Johnson, Marcus and Lee, Yoonhyoung},
doi = {10.1037/0278-7393.32.6.1304},
issn = {1939-1285},
journal = {Journal of Experimental Psychology: Learning, Memory, and Cognition},
number = {6},
pages = {1304--1321},
title = {{Similarity-based interference during language comprehension: Evidence from eye tracking during reading.}},
volume = {32},
year = {2006}
}
@article{Tikka2012,
abstract = {We outline general theoretical and practical implications of what we promote as enactive cinema for the neuroscientific study of online socio-emotional interaction. In a real-time functional magnetic resonance imaging (rt-fMRI) setting, participants are immersed in cinematic experiences that simulate social situations. While viewing, their physiological reactions - including brain responses - are tracked, representing implicit and unconscious experiences of the on-going social situations. These reactions, in turn, are analysed in real-time and fed back to modify the cinematic sequences they are viewing while being scanned. Due to the engaging cinematic content, the proposed setting focuses on living-by in terms of shared psycho-physiological epiphenomena of experience rather than active coping in terms of goal-oriented motor actions. It constitutes a means to parametrically modify stimuli that depict social situations and their broader environmental contexts. As an alternative to studying the variation of brain responses as a function of a priori fixed stimuli, this method can be applied to survey the range of stimuli that evoke similar responses across participants at particular brain regions of interest.},
author = {Tikka, Pia and V{\"{a}}ljam{\"{a}}e, Aleksander and de Borst, Aline W. and Pugliese, Roberto and Ravaja, Niklas and Kaipainen, Mauri and Takala, Tapio},
doi = {10.3389/fnhum.2012.00298},
file = {:C$\backslash$:/Users/Asim H. Dar/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Tikka et al. - 2012 - Enactive cinema paves way for understanding complex real-time social interaction in neuroimaging experiments.pdf:pdf},
issn = {1662-5161},
journal = {Frontiers in Human Neuroscience},
keywords = {Brain-Computer-Interfaces,enactive cinema,generative storytelling,implicit interaction,real-time fMRI,social neuroscience,two-way feedback},
month = {nov},
pages = {298},
publisher = {Frontiers},
title = {{Enactive cinema paves way for understanding complex real-time social interaction in neuroimaging experiments}},
volume = {6},
year = {2012}
}
@techreport{Larsson2016,
author = {Larsson and Linn{\'{e}}a},
file = {:C$\backslash$:/Users/Asim H. Dar/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Larsson, Linn{\'{e}}a - 2016 - P O B o x 1 1 7 2 2 1 0 0 L u n d 4 6 4 6-2 2 2 0 0 0 0 Event Detection in Eye-Tracking Data for Use in Applic.pdf:pdf},
title = {{P O B o x 1 1 7 2 2 1 0 0 L u n d + 4 6 4 6-2 2 2 0 0 0 0 Event Detection in Eye-Tracking Data for Use in Applications with Dynamic Stimuli}},
url = {http://portal.research.lu.se/portal/files/6192499/8600514.pdf},
year = {2016}
}
@article{Nystrom2010AnData,
title = {{An adaptive algorithm for fixation, saccade, and glissade detection in eyetracking data}},
year = {2010},
journal = {Behavior Research Methods},
author = {Nystr{\"{o}}m, Marcus and Holmqvist, Kenneth},
number = {1},
month = {2},
pages = {188--204},
volume = {42},
publisher = {Springer-Verlag},
doi = {10.3758/BRM.42.1.188},
issn = {1554-351X}
}
@Article{Stampe1993,
author="Stampe, Dave M.",
title="Heuristic filtering and reliable calibration methods for video-based pupil-tracking systems",
journal="Behavior Research Methods, Instruments, {\&} Computers",
year="1993",
month="Jun",
day="01",
volume="25",
number="2",
pages="137--142",
abstract="Methods for enhancing the accuracy of fixation and saccade detection and the reliability of calibration in video gaze-tracking systems are discussed. The unique aspects of the present approach include effective low-delay noise reduction prior to the detection of fixation changes, monitoring of gaze position in real time by the operator, identification of saccades as small as 0.5{\textdegree} while eliminating false fixations, and a quick, high-precision, semiautomated calibration procedure.",
issn="1532-5970",
doi="10.3758/BF03204486"
}
@article{dorr2010variability,
title={Variability of eye movements when viewing dynamic natural scenes},
author={Dorr, Michael and Martinetz, Thomas and Gegenfurtner, Karl R and Barth, Erhardt},
journal={Journal of vision},
volume={10},
number={10},
pages={28--28},
year={2010},
publisher={The Association for Research in Vision and Ophthalmology},
doi={10.1167/10.10.28}
}
@Article{Duchowski2002,
author="Duchowski, Andrew T.",
title="A breadth-first survey of eye-tracking applications",
journal="Behavior Research Methods, Instruments, {\&} Computers",
year="2002",
month="Nov",
day="01",
volume="34",
number="4",
pages="455--470",
abstract="Eye-tracking applications are surveyed in a breadth-first manner, reporting on work from the following domains: neuroscience, psychology, industrial engineering and human factors, marketing/advertising, and computer science. Following a review of traditionally diagnostic uses, emphasis is placed on interactive applications, differentiating between selective and gaze-contingent approaches.",
issn="1532-5970",
doi="10.3758/BF03195475"
}
@book{holmqvist2011eye,
title={Eye tracking: A comprehensive guide to methods and measures},
author={Holmqvist, Kenneth and Nystr{\"o}m, Marcus and Andersson, Richard and Dewhurst, Richard and Jarodzka, Halszka and Van de Weijer, Joost},
year={2011},
publisher={OUP Oxford}
}
@inproceedings{munn2008fixation,
title={Fixation-identification in dynamic scenes: Comparing an automated algorithm to manual coding},
author={Munn, Susan M and Stefano, Leanne and Pelz, Jeff B},
booktitle={Proceedings of the 5th symposium on Applied perception in graphics and visualization},
pages={33--42},
year={2008},
doi={10.1145/1394281.1394287},
organization={ACM}
}
@article{LARSSON2015145,
title = "Detection of fixations and smooth pursuit movements in high-speed eye-tracking data",
journal = "Biomedical Signal Processing and Control",
volume = "18",
pages = "145 - 152",
year = "2015",
issn = "1746-8094",
doi = "https://doi.org/10.1016/j.bspc.2014.12.008",
author = "Linnéa Larsson and Marcus Nystr{\"{o}}m and Richard Andersson and Martin Stridh",
keywords = "Signal processing, Eye-tracking, Smooth pursuit",
abstract = "A novel algorithm for the detection of fixations and smooth pursuit movements in high-speed eye-tracking data is proposed, which uses a three-stage procedure to divide the intersaccadic intervals into a sequence of fixation and smooth pursuit events. The first stage performs a preliminary segmentation while the latter two stages evaluate the characteristics of each such segment and reorganize the preliminary segments into fixations and smooth pursuit events. Five different performance measures are calculated to investigate different aspects of the algorithm's behavior. The algorithm is compared to the current state-of-the-art (I-VDT and the algorithm in [11]), as well as to annotations by two experts. The proposed algorithm performs considerably better (average Cohen's kappa 0.42) than the I-VDT algorithm (average Cohen's kappa 0.20) and the algorithm in [11] (average Cohen's kappa 0.16), when compared to the experts’ annotations."
}
@Article{Komogortsev2013,
author="Komogortsev, Oleg V.
and Karpov, Alex",
title="Automated classification and scoring of smooth pursuit eye movements in the presence of fixations and saccades",
journal="Behavior Research Methods",
year="2013",
month="Mar",
day="01",
volume="45",
number="1",
pages="203--215",
abstract="Ternary eye movement classification, which separates fixations, saccades, and smooth pursuit from the raw eye positional data, is extremely challenging. This article develops new and modifies existing eye-tracking algorithms for the purpose of conducting meaningful ternary classification. To this end, a set of qualitative and quantitative behavior scores is introduced to facilitate the assessment of classification performance and to provide means for automated threshold selection. Experimental evaluation of the proposed methods is conducted using eye movement records obtained from 11 subjects at 1000 Hz in response to a step-ramp stimulus eliciting fixations, saccades, and smooth pursuits. Results indicate that a simple hybrid method that incorporates velocity and dispersion thresholding allows producing robust classification performance. It is concluded that behavior scores are able to aid automated threshold selection for the algorithms capable of successful classification.",
issn="1554-3528",
doi="10.3758/s13428-012-0234-9"
}
@Article{Zemblys2018,
author="Zemblys, Raimondas
and Niehorster, Diederick C.
and Holmqvist, Kenneth",
title="gazeNet: End-to-end eye-movement event detection with deep neural networks",
journal="Behavior Research Methods",
year="2018",
month="Oct",
day="17",
abstract="Existing event detection algorithms for eye-movement data almost exclusively rely on thresholding one or more hand-crafted signal features, each computed from the stream of raw gaze data. Moreover, this thresholding is largely left for the end user. Here we present and develop gazeNet, a new framework for creating event detectors that do not require hand-crafted signal features or signal thresholding. It employs an end-to-end deep learning approach, which takes raw eye-tracking data as input and classifies it into fixations, saccades and post-saccadic oscillations. Our method thereby challenges an established tacit assumption that hand-crafted features are necessary in the design of event detection algorithms. The downside of the deep learning approach is that a large amount of training data is required. We therefore first develop a method to augment hand-coded data, so that we can strongly enlarge the data set used for training, minimizing the time spent on manual coding. Using this extended hand-coded data, we train a neural network that produces eye-movement event classification from raw eye-movement data without requiring any predefined feature extraction or post-processing steps. The resulting classification performance is at the level of expert human coders. Moreover, an evaluation of gazeNet on two other datasets showed that gazeNet generalized to data from different eye trackers and consistently outperformed several other event detection algorithms that we tested.",
issn="1554-3528",
doi="10.3758/s13428-018-1133-5"
}
@article{real_world,
author = {Matusz, Pawel J. and Dikker, Suzanne and Huth, Alexander G. and Perrodin, Catherine},
title = {Are We Ready for Real-world Neuroscience?},
journal = {Journal of Cognitive Neuroscience},
volume = {31},
number = {3},
pages = {327-338},
year = {2019},
doi = {10.1162/jocn\_e\_01276},
note ={PMID: 29916793},
abstract = { Real-world environments are typically dynamic, complex, and multisensory in nature and require the support of top–down attention and memory mechanisms for us to be able to drive a car, make a shopping list, or pour a cup of coffee. Fundamental principles of perception and functional brain organization have been established by research utilizing well-controlled but simplified paradigms with basic stimuli. The last 30 years ushered a revolution in computational power, brain mapping, and signal processing techniques. Drawing on those theoretical and methodological advances, over the years, research has departed more and more from traditional, rigorous, and well-understood paradigms to directly investigate cognitive functions and their underlying brain mechanisms in real-world environments. These investigations typically address the role of one or, more recently, multiple attributes of real-world environments. Fundamental assumptions about perception, attention, or brain functional organization have been challenged—by studies adapting the traditional paradigms to emulate, for example, the multisensory nature or varying relevance of stimulation or dynamically changing task demands. Here, we present the state of the field within the emerging heterogeneous domain of real-world neuroscience. To be precise, the aim of this Special Focus is to bring together a variety of the emerging “real-world neuroscientific” approaches. These approaches differ in their principal aims, assumptions, or even definitions of “real-world neuroscience” research. Here, we showcase the commonalities and distinctive features of the different “real-world neuroscience” approaches. To do so, four early-career researchers and the speakers of the Cognitive Neuroscience Society 2017 Meeting symposium under the same title answer questions pertaining to the added value of such approaches in bringing us closer to accurate models of functional brain organization and cognitive functions.}
}
@Article{Hooge2018,
author="Hooge, Ignace T. C.
and Niehorster, Diederick C.
and Nystr{\"o}m, Marcus
and Andersson, Richard
and Hessels, Roy S.",
title="Is human classification by experienced untrained observers a gold standard in fixation detection?",
journal="Behavior Research Methods",
year="2018",
month="Oct",
day="01",
volume="50",
number="5",
pages="1864--1881",
abstract="Manual classification is still a common method to evaluate event detection algorithms. The procedure is often as follows: Two or three human coders and the algorithm classify a significant quantity of data. In the gold standard approach, deviations from the human classifications are considered to be due to mistakes of the algorithm. However, little is known about human classification in eye tracking. To what extent do the classifications from a larger group of human coders agree? Twelve experienced but untrained human coders classified fixations in 6 min of adult and infant eye-tracking data. When using the sample-based Cohen's kappa, the classifications of the humans agreed near perfectly. However, we found substantial differences between the classifications when we examined fixation duration and number of fixations. We hypothesized that the human coders applied different (implicit) thresholds and selection rules. Indeed, when spatially close fixations were merged, most of the classification differences disappeared. On the basis of the nature of these intercoder differences, we concluded that fixation classification by experienced untrained human coders is not a gold standard. To bridge the gap between agreement measures (e.g., Cohen's kappa) and eye movement parameters (fixation duration, number of fixations), we suggest the use of the event-based F1 score and two new measures: the relative timing offset (RTO) and the relative timing deviation (RTD).",
issn="1554-3528",
doi="10.3758/s13428-017-0955-x"
}
@ARTICLE{5523936,
author={O. V. {Komogortsev} and D. V. {Gobert} and S. {Jayarathna} and D. H. {Koh} and S. M. {Gowda}},
journal={IEEE Transactions on Biomedical Engineering},
title={Standardization of Automated Analyses of Oculomotor Fixation and Saccadic Behaviors},
year={2010},
volume={57},
number={11},
pages={2635-2645},
keywords={biomechanics;biomedical optical imaging;eye;image classification;medical image processing;oculomotor fixation;saccadic behaviors;standardization;eye movement classification algorithms;stimulus-evoked task;threshold-value selection;Standardization;Classification algorithms;Computer science;Logic;Humans;Visual system;Psychology;Permission;Brain injuries;Alzheimer's disease;Analysis;baseline;eye-movement classification;oculomotor behavior;Adolescent;Adult;Algorithms;Female;Fixation, Ocular;Humans;Male;Saccades;Young Adult},
doi={10.1109/TBME.2010.2057429},
ISSN={0018-9294},
month={Nov}
}
@article{gorgolewski2016brain,
title={The brain imaging data structure, a format for organizing and describing outputs of neuroimaging experiments},
author={Gorgolewski, Krzysztof J and Auer, Tibor and Calhoun, Vince D and Craddock, R Cameron and Das, Samir and Duff, Eugene P and Flandin, Guillaume and Ghosh, Satrajit S and Glatard, Tristan and Halchenko, Yaroslav O and others},
journal={Scientific Data},
volume={3},
pages={160044},
year={2016},
doi={10.1038/sdata.2016.44},
publisher={Nature Publishing Group}
}
@article{carl1987pursuits,
author = {Carl, J. R. and Gellman, R. S.},
title = {Human smooth pursuit: stimulus-dependent responses},
journal = {Journal of Neurophysiology},
volume = {57},
number = {5},
pages = {1446-1463},
year = {1987},
doi = {10.1152/jn.1987.57.5.1446},
note ={PMID: 3585475},
abstract = { We studied pursuit eye movements in seven normal human subjects with the scleral search-coil technique. The initial eye movements in response to unpredictable changes in target motion were analyzed to determine the effect of target velocity and position on the latency and acceleration of the response. By restricting our analysis to the presaccadic portion of the response we were able to eliminate any saccadic interactions, and the randomized stimulus presentation minimized anticipatory responses. This approach has allowed us to characterize a part of the smooth-pursuit system that is dependent primarily on retinal image properties. The latency of the smooth-pursuit response was very consistent, with a mean of 100 +/- 5 ms to targets moving 5 degrees/s or faster. The responses were the same whether the velocity step was presented when the target was initially stationary or after tracking was established. The latency did increase for lower velocity targets; this increase was well described by a latency model requiring a minimum target movement of 0.028 degrees, in addition to a fixed processing time of 98 ms. The presaccadic accelerations were fairly low, and increased with target velocity until an acceleration of about 50 degrees/s2 was reached for target velocities of 10 degrees/s. Higher velocities produced only a slight increase in eye acceleration. When the target motion was adjusted so that the retinal image slip occurred at increasing distances from the fovea, the accelerations declined until no presaccadic response was measurable when the image slip started 15 degrees from the fovea. The smooth-pursuit response to a step of target position was a brief acceleration; this response occurred even when an oppositely directed velocity stimulus was present. The latency of the pursuit response to such a step was also approximately 100 ms. This result seems consistent with the idea that sensory pathways act as a low-pass spatiotemporal filter of the retinal input, effectively converting position steps into briefly moving stimuli. There was a large asymmetry in the responses to position steps: the accelerations were much greater when the position step of the target was away from the direction of tracking, compared with steps in the direction of tracking. The asymmetry may be due to the addition of a fixed slowing of the eyes whenever the target image disappears from the foveal region. When saccades were delayed by step-ramp stimuli, eye accelerations increased markedly approximately 200 ms after stimulus onset.(ABSTRACT TRUNCATED AT 400 WORDS)}
}
@Article{Startsev2018,
author="Startsev, Mikhail
and Agtzidis, Ioannis
and Dorr, Michael",
title="1D CNN with BLSTM for automated classification of fixations, saccades, and smooth pursuits",
journal="Behavior Research Methods",
year="2018",
month="Nov",
day="08",
abstract="Deep learning approaches have achieved breakthrough performance in various domains. However, the segmentation of raw eye-movement data into discrete events is still done predominantly either by hand or by algorithms that use hand-picked parameters and thresholds. We propose and make publicly available a small 1D-CNN in conjunction with a bidirectional long short-term memory network that classifies gaze samples as fixations, saccades, smooth pursuit, or noise, simultaneously assigning labels in windows of up to 1 s. In addition to unprocessed gaze coordinates, our approach uses different combinations of the speed of gaze, its direction, and acceleration, all computed at different temporal scales, as input features. Its performance was evaluated on a large-scale hand-labeled ground truth data set (GazeCom) and against 12 reference algorithms. Furthermore, we introduced a novel pipeline and metric for event detection in eye-tracking recordings, which enforce stricter criteria on the algorithmically produced events in order to consider them as potentially correct detections. Results show that our deep approach outperforms all others, including the state-of-the-art multi-observer smooth pursuit detector. We additionally test our best model on an independent set of recordings, where our approach stays highly competitive compared to literature methods.",
issn="1554-3528",
doi="10.3758/s13428-018-1144-2"
}
@article{Schutz2011,
author = {Schutz, A. C. and Braun, D. I. and Gegenfurtner, K. R.},
doi = {10.1167/11.5.9},
file = {:C$\backslash$:/Users/Asim H. Dar/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Schutz, Braun, Gegenfurtner - 2011 - Eye movements and perception A selective review.pdf:pdf},
issn = {1534-7362},
journal = {Journal of Vision},
keywords = {eye,eye movement,motion,motion perception,noise,object recognition,perception,pursuit, smooth,saccades},
mendeley-groups = {EyeGaze},
month = {sep},
number = {5},
pages = {9--9},
publisher = {The Association for Research in Vision and Ophthalmology},
title = {{Eye movements and perception: A selective review}},
volume = {11},
year = {2011}
}
@article{cohen1960coefficient,
title={A coefficient of agreement for nominal scales},
author={Cohen, Jacob},
journal={Educational and psychological measurement},
volume={20},
number={1},
pages={37--46},
year={1960},
publisher={Sage Publications Sage CA: Thousand Oaks, CA}
}
@article{hessels2018eye,
title={Is the eye-movement field confused about fixations and saccades? A survey among 124 researchers},
author={Hessels, Roy S and Niehorster, Diederick C and Nystr{\"o}m, Marcus and Andersson, Richard and Hooge, Ignace TC},
journal={Royal Society open science},
volume={5},
number={8},
pages={180502},
year={2018},
publisher={The Royal Society}
}
@inproceedings{holmqvist2012eye,
title={Eye tracker data quality: what it is and how to measure it},
author={Holmqvist, Kenneth and Nystr{\"o}m, Marcus and Mulvey, Fiona},
booktitle={Proceedings of the symposium on eye tracking research and applications},
pages={45--52},
year={2012},
organization={ACM}
}
@article{maguire2012studying,
title={Studying the freely-behaving brain with fMRI},
author={Maguire, Eleanor A},
journal={Neuroimage},
volume={62},
number={2},
pages={1170--1176},
year={2012},
publisher={Elsevier}
}
@article{choe2016pupil,
title={Pupil size dynamics during fixation impact the accuracy and precision of video-based gaze estimation},
author={Choe, Kyoung Whan and Blake, Randolph and Lee, Sang-Hun},
journal={Vision research},
volume={118},
pages={48--59},
year={2016},
publisher={Elsevier}
}
@inproceedings{Mathe2012,
author = {Mathe, Stefan and Sminchisescu, Cristian},
title = {Dynamic Eye Movement Datasets and Learnt Saliency Models for Visual Action Recognition},
year = {2012},
isbn = {9783642337086},
publisher = {Springer-Verlag},
address = {Berlin, Heidelberg},
booktitle = {Proceedings, Part II, of the 12th European Conference on Computer Vision --- ECCV 2012 - Volume 7573},
pages = {842–856},
numpages = {15}
}
@article{Friedman2018,
author = {Friedman, Lee and Rigas, Ioannis and Abdulin, Evgeny and Komogortsev, Oleg V.},
doi = {10.3758/s13428-018-1050-7},
file = {:C$\backslash$:/Users/Asim H. Dar/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Friedman et al. - 2018 - A novel evaluation of two related and two independent algorithms for eye movement classification during read(2).pdf:pdf},
issn = {1554-3528},
journal = {Behavior Research Methods},
month = {aug},
number = {4},
pages = {1374--1397},
publisher = {Springer US},
title = {{A novel evaluation of two related and two independent algorithms for eye movement classification during reading}},
volume = {50},
year = {2018}
}
@article{Hanke2016,
abstract = {A {\textless}i{\textgreater}studyforrest{\textless}/i{\textgreater} extension, simultaneous fMRI and eye gaze recordings during prolonged natural stimulation},
author = {Hanke, Michael and Adelh{\"{o}}fer, Nico and Kottke, Daniel and Iacovella, Vittorio and Sengupta, Ayan and Kaule, Falko R. and Nigbur, Roland and Waite, Alexander Q. and Baumgartner, Florian and Stadler, J{\"{o}}rg},
doi = {10.1038/sdata.2016.92},
file = {:C$\backslash$:/Users/Asim H. Dar/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Hanke et al. - 2016 - A studyforrest extension, simultaneous fMRI and eye gaze recordings during prolonged natural stimulation.pdf:pdf},
issn = {2052-4463},
journal = {Scientific Data},
keywords = {Attention,Cortex,Language,Neural encoding,Visual system},
month = {oct},
pages = {160092},
publisher = {Nature Publishing Group},
title = {{A studyforrest extension, simultaneous fMRI and eye gaze recordings during prolonged natural stimulation}},
volume = {3},
year = {2016}
}
@article{HantaoLiu2011,
abstract = {Since the human visual system (HVS) is the ultimate assessor of image quality, current research on the design of objective image quality metrics tends to include an important feature of the HVS, namely, visual attention. Different metrics for image quality prediction have been extended with a computational model of visual attention, but the resulting gain in reliability of the metrics so far was variable. To better understand the basic added value of including visual attention in the design of objective metrics, we used measured data of visual attention. To this end, we performed two eye-tracking experiments: one with a free-looking task and one with a quality assessment task. In the first experiment, 20 observers looked freely to 29 unimpaired original images, yielding us so-called natural scene saliency (NSS). In the second experiment, 20 different observers assessed the quality of distorted versions of the original images. The resulting saliency maps showed some differences with the NSS, and therefore, we applied both types of saliency to four different objective metrics predicting the quality of JPEG compressed images. For both types of saliency the performance gain of the metrics improved, but to a larger extent when adding the NSS. As a consequence, we further integrated NSS in several state-of-the-art quality metrics, including three full-reference metrics and two no-reference metrics, and evaluated their prediction performance for a larger set of distortions. By doing so, we evaluated whether and to what extent the addition of NSS is beneficial to objective quality prediction in general terms. In addition, we address some practical issues in the design of an attention-based metric. The eye-tracking data are made available to the research community {\textless}citerefgrp{\textgreater}{\textless}citeref refid="ref1"/{\textgreater}{\textless}/citerefgrp{\textgreater}.},
author = {Liu, Hantao and Heynderickx, Ingrid},
doi = {10.1109/TCSVT.2011.2133770},
isbn = {1051-8215},
issn = {10518215},
journal = {IEEE Transactions on Circuits and Systems for Video Technology},
keywords = {Eye tracking,image quality assessment,objective metric,saliency map,visual attention},
month = {jul},
number = {7},
pages = {971--982},
title = {{Visual attention in objective image quality assessment: Based on eye-tracking data}},
volume = {21},
year = {2011}
}
@article{Larsson2013,
abstract = {A novel algorithm for detection of saccades and postsaccadic oscillations in the presence of smooth pursuit movements is proposed. The method combines saccade detection in the acceleration domain with specialized on- and offset criteria for saccades and postsaccadic oscillations. The performance of the algorithm is evaluated by comparing the detection results to those of an existing velocity-based adaptive algorithm and a manually annotated database. The results show that there is a good agreement between the events detected by the proposed algorithm and those in the annotated database with Cohen's kappa around 0.8 for both a development and a test database. In conclusion, the proposed algorithm accurately detects saccades and postsaccadic oscillations as well as intervals of disturbances.},
author = {Larsson, Linnea and Nystr{\"{o}}m, Marcus and Stridh, Martin},
doi = {10.1109/TBME.2013.2258918},
isbn = {1558-2531 (Electronic) 0018-9294 (Linking)},
issn = {15582531},
journal = {IEEE Transactions on Biomedical Engineering},
keywords = {Eye-tracking,signal processing,smooth pursuit},
month = {sep},
number = {9},
pages = {2484--2493},
pmid = {23625350},
title = {{Detection of saccades and postsaccadic oscillations in the presence of smooth pursuit}},
volume = {60},
year = {2013}
}
@article{Hanke2014,
abstract = {A high-resolution 7-Tesla fMRI dataset from complex natural stimulation with an audio movie},
author = {Hanke, Michael and Baumgartner, Florian J. and Ibe, Pierre and Kaule, Falko R. and Pollmann, Stefan and Speck, Oliver and Zinke, Wolf and Stadler, J{\"{o}}rg},
doi = {10.1038/sdata.2014.3},
file = {:C$\backslash$:/Users/Asim H. Dar/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Hanke et al. - 2014 - A high-resolution 7-Tesla fMRI dataset from complex natural stimulation with an audio movie.pdf:pdf},
issn = {2052-4463},
journal = {Scientific Data},
keywords = {Auditory system,Functional magnetic resonance imaging,Language,Perception},
month = {may},
pages = {140003},
publisher = {Nature Publishing Group},
title = {{A high-resolution 7-Tesla fMRI dataset from complex natural stimulation with an audio movie}},
volume = {1},
year = {2014}
}
@article{Harris2014,
abstract = {Face-selective regions in the amygdala and posterior superior temporal sulcus (pSTS) are strongly implicated in the processing of transient facial signals, such as expression. Here, we measured neural responses in participants while they viewed dynamic changes in facial expression. Our aim was to explore how facial expression is represented in different face-selective regions. Short movies were generated by morphing between faces posing a neutral expression and a prototypical expression of a basic emotion (either anger, disgust, fear, happiness or sadness). These dynamic stimuli were presented in block design in the following four stimulus conditions: (1) same-expression change, same-identity, (2) same-expression change, different-identity, (3) different-expression change, same-identity, and (4) different-expression change, different-identity. So, within a same-expression change condition the movies would show the same change in expression whereas in the different-expression change conditions each movie would have a different change in expression. Facial identity remained constant during each movie but in the different identity conditions the facial identity varied between each movie in a block. The amygdala, but not the posterior STS, demonstrated a greater response to blocks in which each movie morphed from neutral to a different emotion category compared to blocks in which each movie morphed to the same emotion category. Neural adaptation in the amygdala was not affected by changes in facial identity. These results are consistent with a role of the amygdala in category-based representation of facial expressions of emotion.},
author = {Harris, Richard J and Young, Andrew W and Andrews, Timothy J},
doi = {10.1016/j.neuropsychologia.2014.01.005},
file = {:C$\backslash$:/Users/Asim H. Dar/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Harris, Young, Andrews - 2014 - Dynamic stimuli demonstrate a categorical representation of facial expression in the amygdala.pdf:pdf},
issn = {1873-3514},
journal = {Neuropsychologia},
keywords = {Emotion,Expression,Face,fMRI},
month = {apr},
number = {100},
pages = {47--52},
pmid = {24447769},
publisher = {Elsevier},
title = {{Dynamic stimuli demonstrate a categorical representation of facial expression in the amygdala.}},
volume = {56},
year = {2014}
}
@Misc{JOP+2001,
author = {Eric Jones and Travis Oliphant and Pearu Peterson and others},
title = {{SciPy}: Open source scientific tools for {Python}},
year = {2001--},
url = "http://www.scipy.org"
}
@book{oliphant2006guide,
title={A guide to NumPy},
author={Oliphant, Travis E},
volume={1},
year={2006},
publisher={Trelgol Publishing USA}
}
@article{hunter2007matplotlib,
title={Matplotlib: A 2D graphics environment},
author={Hunter, John D},
journal={Computing in science \& engineering},
volume={9},
number={3},
pages={90--95},
year={2007},
publisher={IEEE},
doi={10.1109/MCSE.2007.55}
}
@inproceedings{mckinney2010data,
title={Data structures for statistical computing in python},
author={McKinney, Wes and others},
booktitle={Proceedings of the 9th Python in Science Conference},
volume={445},
pages={51--56},
year={2010},
organization={Austin, TX}
}
@inproceedings{seabold2010statsmodels,
title={Statsmodels: Econometric and statistical modeling with python},
author={Seabold, Skipper and Perktold, Josef},
booktitle={9th Python in Science Conference},
year={2010},
}
@Misc{HH+2013,
author = {Yaroslav O. Halchenko and Michael Hanke and others},
title = {{DataLad}: perpetual decentralized management of digital objects},
year = {2013--},
url = "http://datalad.org",
doi={10.5281/zenodo.1470735}
}
@article{hessels2017noise,
title={Noise-robust fixation detection in eye movement data: Identification by two-means clustering (I2MC)},
author={Hessels, Roy S and Niehorster, Diederick C and Kemner, Chantal and Hooge, Ignace TC},
journal={Behavior research methods},
volume={49},
number={5},
pages={1802--1823},
year={2017},
publisher={Springer}
}
@article{HOOGE20166,
title = "The pupil is faster than the corneal reflection (CR): Are video based pupil-CR eye trackers suitable for studying detailed dynamics of eye movements?",
journal = "Vision Research",
volume = "128",
pages = "6 - 18",
year = "2016",
issn = "0042-6989",
doi = "https://doi.org/10.1016/j.visres.2016.09.002",
url = "http://www.sciencedirect.com/science/article/pii/S0042698916301031",
author = "Ignace Hooge and Kenneth Holmqvist and Marcus Nyström",
keywords = "Saccades, Pupil, Corneal reflection",
abstract = "Most modern video eye trackers use the p-CR (pupil minus CR) technique to deal with small relative movements between the eye tracker camera and the eye. We question whether the p-CR technique is appropriate to investigate saccade dynamics. In two experiments we investigated the dynamics of pupil, CR and gaze signals obtained from a standard SMI Hi-Speed eye tracker. We found many differences between the pupil and the CR signals. Differences concern timing of the saccade onset, saccade peak velocity and post-saccadic oscillation (PSO). We also obtained that pupil peak velocities were higher than CR peak velocities. Saccades in the eye trackers’ gaze signal (that is constructed from p-CR) appear to be excessive versions of saccades in the pupil signal. We conclude that the pupil-CR technique is not suitable for studying detailed dynamics of eye movements."
}
@article{dalveren2019evaluation,
title={Evaluation of Ten Open-Source Eye-Movement Classification Algorithms in Simulated Surgical Scenarios},
author={Dalveren, Gonca Gokce Menekse and Cagiltay, Nergiz Ercil},
journal={IEEE Access},
volume={7},
pages={161794--161804},
year={2019},
publisher={IEEE}
}
@article{van2018gazepath,
title={Gazepath: An eye-tracking analysis tool that accounts for individual differences and data quality},
author={van Renswoude, Daan R and Raijmakers, Maartje EJ and Koornneef, Arnout and Johnson, Scott P and Hunnius, Sabine and Visser, Ingmar},
journal={Behavior research methods},
volume={50},
number={2},
pages={834--852},
year={2018},
publisher={Springer}
}