|
@@ -0,0 +1,232 @@
|
|
|
+@InProceedings{lopez2017gradient,
|
|
|
+ author = {Lopez-Paz, David and Ranzato, Marc'Aurelio},
|
|
|
+ title = {Gradient episodic memory for continual learning},
|
|
|
+ year = {2017},
|
|
|
+ pages = {6467--6476},
|
|
|
+ volume = {30},
|
|
|
+ comment = {They introduce metrics for evaluting backward and forward transfer for task incremental learning and assume task label available at inference.
|
|
|
+No assumptions on the number of tasks are made.
|
|
|
+Use Memory buffer to constraint updates when training new tasks.
|
|
|
+Constraint: gradient direction of past task (estimated with memory) has positive dot product with gradient from batch.
|
|
|
+Disadvantage: slow optimization with constraint & TASK INCEMENTAL},
|
|
|
+ file = {:life_long_learning_papers/lopez2017gradient - Gradient Episodic Memory for Continual Learning.pdf:PDF},
|
|
|
+ journal = {Advances in neural information processing systems},
|
|
|
+ url = {https://proceedings.neurips.cc/paper/2017/file/f87522788a2be2d171666752f97ddebb-Paper.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@InProceedings{Prabhu2020GDumbAS,
|
|
|
+ author = {Ameya Prabhu and Philip H. S. Torr and P. Dokania},
|
|
|
+ booktitle = {ECCV},
|
|
|
+ title = {GDumb: A Simple Approach that Questions Our Progress in Continual Learning},
|
|
|
+ year = {2020},
|
|
|
+ comment = {\begin{itemize}
|
|
|
+\item GDumb = Greedy Smapler and Dumb Learner (class balanced storing, retrained from scratch)
|
|
|
+\item{Simplifying Assumptions in CL}
|
|
|
+\begin{enumerate}
|
|
|
+ \item Disjoint Task Formulation: at a particular duration in time data-stream will provide samples specific to one task.
|
|
|
+ \item Task-Incremental(multi-head): along with the disjoint task assumption, the task information (or id) is also passed by an oracle during training and inference. In Class-incremental learning no such task information is given
|
|
|
+ \item Online CL: restricting the learner to use each sampel only once to update parameters. In offline CL there is unrestricted access to entire dataset of current task
|
|
|
+\end{itemize}
|
|
|
+\item Online CL preferably in situations fast spitting data stream.
|
|
|
+\item found GDumb outperforms most methods by large margin},
|
|
|
+ file = {:life_long_learning_papers/Prabhu2020GDumbAS - GDumb_ a Simple Approach That Questions Our Progress in Continual Learning.pdf:PDF},
|
|
|
+ url = {https://link.springer.com/content/pdf/10.1007/978-3-030-58536-5_31.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{Aljundi2019OnlineCL,
|
|
|
+ author = {Rahaf Aljundi and Lucas Caccia and Eugene Belilovsky and Massimo Caccia and Min Lin and Laurent Charlin and T. Tuytelaars},
|
|
|
+ journal = {ArXiv},
|
|
|
+ title = {Online Continual Learning with Maximally Interfered Retrieval},
|
|
|
+ year = {2019},
|
|
|
+ volume = {abs/1908.04742},
|
|
|
+ comment = {\item CI-CL, online, disjoint, memory based approach.
|
|
|
+\item Sample criterion for controlled sampling for replay retrieving samples where predictions will be most negatively impacted by forseen parameter update. Their Research question: what samples should be replayed from the previous history when new samples are
|
|
|
+received
|
|
|
+\item Main idea: Experience Replay where samples for augmenting batch from strem are selected when their loss changes most, when updating on new data (computed for subset of previous data).
|
|
|
+\item also applicable to generative replay approaches
|
|
|
+\item in a reltivel small number of total classes/task case (MNIST SPLIT) their approach (ER+MIR) is significantly better (87,6\%) than random sampling with ER (82.1\%). In other scenarios, their approach outperforms with a smaller margin.
|
|
|
+\item (At least in the ER scenario, I dont really understand, why they restrict themselves to a disjoint setting)},
|
|
|
+ file = {:life_long_learning_papers/Aljundi2019OnlineCL - Online Continual Learning with Maximally Interfered Retrieval.pdf:PDF},
|
|
|
+ url = {https://arxiv.org/pdf/1908.04742.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@InProceedings{Aljundi2019GradientBS,
|
|
|
+ author = {Rahaf Aljundi and Min Lin and Baptiste Goujaud and Yoshua Bengio},
|
|
|
+ booktitle = {NeurIPS},
|
|
|
+ title = {Gradient based sample selection for online continual learning},
|
|
|
+ year = {2019},
|
|
|
+ file = {:life_long_learning_papers/Aljundi2019GradientBS - Gradient Based Sample Selection for Online Continual Learning.pdf:PDF},
|
|
|
+ url = {https://arxiv.org/pdf/1903.08671.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{chaudhry2019continual,
|
|
|
+ author = {Chaudhry, Arslan and Rohrbach, Marcus and Elhoseiny, Mohamed and Ajanthan, Thalaiyasingam and Dokania, Puneet K and Torr, Philip HS and Ranzato, M},
|
|
|
+ title = {Continual learning with tiny episodic memories},
|
|
|
+ year = {2019},
|
|
|
+ comment = {reservoir sampling with imbalanced data "so that the data distribution in the replaybuffer follows the data distribution that has already been seen."},
|
|
|
+ file = {:life_long_learning_papers/chaudhry2019continual - Continual Learning with Tiny Episodic Memories.pdf:PDF},
|
|
|
+ priority = {prio1},
|
|
|
+ url = {https://ora.ox.ac.uk/objects/uuid:6e7580c4-85c9-4874-a52d-e4184046935c/download_file?file_format=pdf&safe_filename=Continual+Learning+with+Tiny+Episodic+Memories.pdf&type_of_work=Conference item},
|
|
|
+}
|
|
|
+
|
|
|
+@InProceedings{isele2018selective,
|
|
|
+ author = {Isele, David and Cosgun, Akansel},
|
|
|
+ booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence},
|
|
|
+ title = {Selective experience replay for lifelong learning},
|
|
|
+ year = {2018},
|
|
|
+ number = {1},
|
|
|
+ volume = {32},
|
|
|
+ comment = {'For example, reservoir sampling has been employed in [5, 8] so that the data distribution in the replaybuffer follows the data distribution that has already been seen. The problem of reservoir sampling isthat the minor modes in the distribution with small probability mass may fail to be represented in thereplay buffer. As a remedy to this problem, coverage maximization is also proposed in [8].'
|
|
|
+
|
|
|
+==8},
|
|
|
+ file = {:life_long_learning_papers/isele2018selective-Selective_Experience_Replay_For_Lifelong_Learrning.pdf:PDF},
|
|
|
+ priority = {prio1},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{shin2017continual,
|
|
|
+ author = {Shin, Hanul and Lee, Jung Kwon and Kim, Jaehong and Kim, Jiwon},
|
|
|
+ journal = {arXiv preprint arXiv:1705.08690},
|
|
|
+ title = {Continual learning with deep generative replay},
|
|
|
+ year = {2017},
|
|
|
+ readstatus = {skimmed},
|
|
|
+ url = {https://arxiv.org/pdf/1705.08690.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{li2017learning,
|
|
|
+ author = {Li, Zhizhong and Hoiem, Derek},
|
|
|
+ journal = {IEEE transactions on pattern analysis and machine intelligence},
|
|
|
+ title = {Learning without forgetting},
|
|
|
+ year = {2017},
|
|
|
+ number = {12},
|
|
|
+ pages = {2935--2947},
|
|
|
+ volume = {40},
|
|
|
+ publisher = {IEEE},
|
|
|
+ readstatus = {skimmed},
|
|
|
+ url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8107520},
|
|
|
+}
|
|
|
+
|
|
|
+@InProceedings{rebuffi2017icarl,
|
|
|
+ author = {Rebuffi, Sylvestre-Alvise and Kolesnikov, Alexander and Sperl, Georg and Lampert, Christoph H},
|
|
|
+ booktitle = {Proceedings of the IEEE conference on Computer Vision and Pattern Recognition},
|
|
|
+ title = {icarl: Incremental classifier and representation learning},
|
|
|
+ year = {2017},
|
|
|
+ pages = {2001--2010},
|
|
|
+ file = {:life_long_learning_papers/rebuffi2017icarl - Icarl_ Incremental Classifier and Representation Learning.pdf:PDF},
|
|
|
+ url = {https://openaccess.thecvf.com/content_cvpr_2017/papers/Rebuffi_iCaRL_Incremental_Classifier_CVPR_2017_paper.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{kirkpatrick2017overcoming,
|
|
|
+ author = {Kirkpatrick, James and Pascanu, Razvan and Rabinowitz, Neil and Veness, Joel and Desjardins, Guillaume and Rusu, Andrei A and Milan, Kieran and Quan, John and Ramalho, Tiago and Grabska-Barwinska, Agnieszka and others},
|
|
|
+ journal = {Proceedings of the national academy of sciences},
|
|
|
+ title = {Overcoming catastrophic forgetting in neural networks},
|
|
|
+ year = {2017},
|
|
|
+ number = {13},
|
|
|
+ pages = {3521--3526},
|
|
|
+ volume = {114},
|
|
|
+ comment = {EWC paper},
|
|
|
+ publisher = {National Acad Sciences},
|
|
|
+ readstatus = {skimmed},
|
|
|
+ url = {https://www.pnas.org/content/pnas/114/13/3521.full.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@InProceedings{chaudhry2018riemannian,
|
|
|
+ author = {Chaudhry, Arslan and Dokania, Puneet K and Ajanthan, Thalaiyasingam and Torr, Philip HS},
|
|
|
+ booktitle = {Proceedings of the European Conference on Computer Vision (ECCV)},
|
|
|
+ title = {Riemannian walk for incremental learning: Understanding forgetting and intransigence},
|
|
|
+ year = {2018},
|
|
|
+ pages = {532--547},
|
|
|
+ file = {:life_long_learning_papers/chaudhry2018riemannian - Riemannian Walk for Incremental Learning_ Understanding Forgetting and Intransigence.pdf:PDF},
|
|
|
+ url = {https://openaccess.thecvf.com/content_ECCV_2018/papers/Arslan_Chaudhry__Riemannian_Walk_ECCV_2018_paper.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{Pelosin2021MoreIB,
|
|
|
+ author = {Francesco Pelosin and A. Torsello},
|
|
|
+ journal = {ArXiv},
|
|
|
+ title = {More Is Better: An Analysis of Instance Quantity/Quality Trade-off in Rehearsal-based Continual Learning},
|
|
|
+ year = {2021},
|
|
|
+ volume = {abs/2105.14106},
|
|
|
+ file = {:life_long_learning_papers/Pelosin2021MoreIB - More Is Better_ an Analysis of Instance Quantity_Quality Trade off in Rehearsal Based Continual Learning.pdf:PDF},
|
|
|
+ url = {https://arxiv.org/pdf/2105.14106.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@InProceedings{Knoblauch2020OptimalCL,
|
|
|
+ author = {Jeremias Knoblauch and H. Husain and Tom Diethe},
|
|
|
+ booktitle = {ICML},
|
|
|
+ title = {Optimal Continual Learning has Perfect Memory and is NP-hard},
|
|
|
+ year = {2020},
|
|
|
+ readstatus = {skimmed},
|
|
|
+ url = {https://arxiv.org/pdf/2006.05188.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{Vitter1985RandomSW,
|
|
|
+ author = {J. Vitter},
|
|
|
+ journal = {ACM Trans. Math. Softw.},
|
|
|
+ title = {Random sampling with a reservoir},
|
|
|
+ year = {1985},
|
|
|
+ pages = {37-57},
|
|
|
+ volume = {11},
|
|
|
+ file = {:Vitter1985RandomSW - Random Sampling with a Reservoir.pdf:PDF},
|
|
|
+ url = {http://www.cs.umd.edu/~samir/498/vitter.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{Lomonaco2020CVPR2C,
|
|
|
+ author = {V. Lomonaco and Lorenzo Pellegrini and Pau Rodr{\'i}guez and Massimo Caccia and Qi She and Y. Chen and Quentin Jodelet and Ruiping Wang and Zheda Mai and David V{\'a}zquez and G. I. Parisi and Nikhil Churamani and M. Pickett and Issam H. Laradji and D. Maltoni},
|
|
|
+ journal = {ArXiv},
|
|
|
+ title = {CVPR 2020 Continual Learning in Computer Vision Competition: Approaches, Results, Current Challenges and Future Directions},
|
|
|
+ year = {2020},
|
|
|
+ volume = {abs/2009.09929},
|
|
|
+ file = {:life_long_learning_papers/Lomonaco2020CVPR2C - CVPR 2020 Continual Learning in Computer Vision Competition_ Approaches, Results, Current Challenges and Future Directions.pdf:PDF},
|
|
|
+ url = {https://arxiv.org/pdf/2009.09929.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{Pellegrini2020LatentRF,
|
|
|
+ author = {Lorenzo Pellegrini and Gabrile Graffieti and V. Lomonaco and D. Maltoni},
|
|
|
+ journal = {2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
|
|
|
+ title = {Latent Replay for Real-Time Continual Learning},
|
|
|
+ year = {2020},
|
|
|
+ pages = {10203-10209},
|
|
|
+ file = {:life_long_learning_papers/Pellegrini2020LatentRF - Latent Replay for Real Time Continual Learning.pdf:PDF},
|
|
|
+ readstatus = {skimmed},
|
|
|
+ url = {https://arxiv.org/pdf/1912.01100.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{Lomonaco2020RehearsalFreeCL,
|
|
|
+ author = {V. Lomonaco and D. Maltoni and Lorenzo Pellegrini},
|
|
|
+ journal = {2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)},
|
|
|
+ title = {Rehearsal-Free Continual Learning over Small Non-I.I.D. Batches},
|
|
|
+ year = {2020},
|
|
|
+ pages = {989-998},
|
|
|
+ file = {:life_long_learning_papers/Lomonaco2020RehearsalFreeCL - Rehearsal Free Continual Learning Over Small Non I.I.D. Batches.pdf:PDF},
|
|
|
+ url = {https://arxiv.org/pdf/1907.03799.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{Maltoni2019ContinuousLI,
|
|
|
+ author = {D. Maltoni and V. Lomonaco},
|
|
|
+ journal = {Neural networks : the official journal of the International Neural Network Society},
|
|
|
+ title = {Continuous Learning in Single-Incremental-Task Scenarios},
|
|
|
+ year = {2019},
|
|
|
+ pages = {56-73},
|
|
|
+ volume = {116},
|
|
|
+ file = {:life_long_learning_papers/Maltoni2019ContinuousLI - Continuous Learning in Single Incremental Task Scenarios.pdf:PDF},
|
|
|
+ url = {https://arxiv.org/pdf/1806.08568.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@InProceedings{zenke2017continual,
|
|
|
+ author = {Zenke, Friedemann and Poole, Ben and Ganguli, Surya},
|
|
|
+ booktitle = {International Conference on Machine Learning},
|
|
|
+ title = {Continual learning through synaptic intelligence},
|
|
|
+ year = {2017},
|
|
|
+ organization = {PMLR},
|
|
|
+ pages = {3987--3995},
|
|
|
+ url = {http://proceedings.mlr.press/v70/zenke17a/zenke17a.pdf},
|
|
|
+}
|
|
|
+
|
|
|
+@Article{Amalapuram2021OnHC,
|
|
|
+ author = {Suresh Kumar Amalapuram and Thushara Tippi Reddy and Sumohana S. Channappayya and Tamma Bheemarjuna Reddy},
|
|
|
+ journal = {The First International Conference on AI-ML-Systems},
|
|
|
+ title = {On Handling Class Imbalance in Continual Learning based Network Intrusion Detection Systems},
|
|
|
+ year = {2021},
|
|
|
+ url = {https://dl.acm.org/doi/pdf/10.1145/3486001.3486231},
|
|
|
+}
|
|
|
+
|
|
|
+@Comment{jabref-meta: databaseType:bibtex;}
|