2023 |
Ceschin, Fabrício; Botacin, Marcus; Bifet, Albert; Pfahringer, Bernhard; Oliveira, Luiz S; Gomes, Heitor Murilo; Grégio, André Machine Learning (In) Security: A Stream of Problems Journal Article Digital Threats, 2023, ISSN: 2692-1626, (Just Accepted). Abstract | Links | BibTeX | Tags: cybersecurity, Data streams, Machine learning @article{10.1145/3617897, title = {Machine Learning (In) Security: A Stream of Problems}, author = {Fabrício Ceschin and Marcus Botacin and Albert Bifet and Bernhard Pfahringer and Luiz S Oliveira and Heitor Murilo Gomes and André Grégio}, url = {https://doi.org/10.1145/3617897 https://secret.inf.ufpr.br/papers/fabricio_mlinsec_dtrap.pdf}, doi = {10.1145/3617897}, issn = {2692-1626}, year = {2023}, date = {2023-09-01}, journal = {Digital Threats}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, abstract = {Machine Learning (ML) has been widely applied to cybersecurity and is considered state-of-the-art for solving many of the open issues in that field. However, it is very difficult to evaluate how good the produced solutions are, since the challenges faced in security may not appear in other areas. One of these challenges is the concept drift, which increases the existing arms race between attackers and defenders: malicious actors can always create novel threats to overcome the defense solutions, which may not consider them in some approaches. Due to this, it is essential to know how to properly build and evaluate an ML-based security solution. In this paper, we identify, detail, and discuss the main challenges in the correct application of ML techniques to cybersecurity data. We evaluate how concept drift, evolution, delayed labels, and adversarial ML impact the existing solutions. Moreover, we address how issues related to data collection affect the quality of the results presented in the security literature, showing that new strategies are needed to improve current solutions. Finally, we present how existing solutions may fail under certain circumstances, and propose mitigations to them, presenting a novel checklist to help the development of future ML solutions for cybersecurity.}, note = {Just Accepted}, keywords = {cybersecurity, Data streams, Machine learning}, pubstate = {published}, tppubtype = {article} } Machine Learning (ML) has been widely applied to cybersecurity and is considered state-of-the-art for solving many of the open issues in that field. However, it is very difficult to evaluate how good the produced solutions are, since the challenges faced in security may not appear in other areas. One of these challenges is the concept drift, which increases the existing arms race between attackers and defenders: malicious actors can always create novel threats to overcome the defense solutions, which may not consider them in some approaches. Due to this, it is essential to know how to properly build and evaluate an ML-based security solution. In this paper, we identify, detail, and discuss the main challenges in the correct application of ML techniques to cybersecurity data. We evaluate how concept drift, evolution, delayed labels, and adversarial ML impact the existing solutions. Moreover, we address how issues related to data collection affect the quality of the results presented in the security literature, showing that new strategies are needed to improve current solutions. Finally, we present how existing solutions may fail under certain circumstances, and propose mitigations to them, presenting a novel checklist to help the development of future ML solutions for cybersecurity. |
2022 |
Ceschin, Fabrício; Botacin, Marcus; Gomes, Heitor Murilo; Pinagé, Felipe; Oliveira, Luiz S; Grégio, André Fast & Furious: On the modelling of malware detection as an evolving data stream Journal Article Expert Systems with Applications, pp. 118590, 2022, ISSN: 0957-4174. Abstract | Links | BibTeX | Tags: Android, Concept drift, Data streams, Machine learning, malware detection @article{CESCHIN2022118590, title = {Fast & Furious: On the modelling of malware detection as an evolving data stream}, author = {Fabrício Ceschin and Marcus Botacin and Heitor Murilo Gomes and Felipe Pinagé and Luiz S Oliveira and André Grégio}, url = {https://www.sciencedirect.com/science/article/pii/S0957417422016463 https://secret.inf.ufpr.br/papers/fabricio_eswa_22.pdf}, doi = {https://doi.org/10.1016/j.eswa.2022.118590}, issn = {0957-4174}, year = {2022}, date = {2022-08-22}, journal = {Expert Systems with Applications}, pages = {118590}, abstract = {Malware is a major threat to computer systems and imposes many challenges to cyber security. Targeted threats, such as ransomware, cause millions of dollars in losses every year. The constant increase of malware infections has been motivating popular antiviruses (AVs) to develop dedicated detection strategies, which include meticulously crafted machine learning (ML) pipelines. However, malware developers unceasingly change their samples’ features to bypass detection. This constant evolution of malware samples causes changes to the data distribution (i.e., concept drifts) that directly affect ML model detection rates, something not considered in the majority of the literature work. In this work, we evaluate the impact of concept drift on malware classifiers for two Android datasets: DREBIN (≈130K apps) and a subset of AndroZoo (≈285K apps). We used these datasets to train an Adaptive Random Forest (ARF) classifier, as well as a Stochastic Gradient Descent (SGD) classifier. We also ordered all datasets samples using their VirusTotal submission timestamp and then extracted features from their textual attributes using two algorithms (Word2Vec and TF-IDF). Then, we conducted experiments comparing both feature extractors, classifiers, as well as four drift detectors (Drift Detection Method, Early Drift Detection Method, ADaptive WINdowing, and Kolmogorov–Smirnov WINdowing) to determine the best approach for real environments. Finally, we compare some possible approaches to mitigate concept drift and propose a novel data stream pipeline that updates both the classifier and the feature extractor. To do so, we conducted a longitudinal evaluation by (i) classifying malware samples collected over nine years (2009–2018), (ii) reviewing concept drift detection algorithms to attest its pervasiveness, (iii) comparing distinct ML approaches to mitigate the issue, and (iv) proposing an ML data stream pipeline that outperformed literature approaches, achieving an improvement of 22.05 percentage points of F1Score in the DREBIN dataset, and 8.77 in the AndroZoo dataset.}, keywords = {Android, Concept drift, Data streams, Machine learning, malware detection}, pubstate = {published}, tppubtype = {article} } Malware is a major threat to computer systems and imposes many challenges to cyber security. Targeted threats, such as ransomware, cause millions of dollars in losses every year. The constant increase of malware infections has been motivating popular antiviruses (AVs) to develop dedicated detection strategies, which include meticulously crafted machine learning (ML) pipelines. However, malware developers unceasingly change their samples’ features to bypass detection. This constant evolution of malware samples causes changes to the data distribution (i.e., concept drifts) that directly affect ML model detection rates, something not considered in the majority of the literature work. In this work, we evaluate the impact of concept drift on malware classifiers for two Android datasets: DREBIN (≈130K apps) and a subset of AndroZoo (≈285K apps). We used these datasets to train an Adaptive Random Forest (ARF) classifier, as well as a Stochastic Gradient Descent (SGD) classifier. We also ordered all datasets samples using their VirusTotal submission timestamp and then extracted features from their textual attributes using two algorithms (Word2Vec and TF-IDF). Then, we conducted experiments comparing both feature extractors, classifiers, as well as four drift detectors (Drift Detection Method, Early Drift Detection Method, ADaptive WINdowing, and Kolmogorov–Smirnov WINdowing) to determine the best approach for real environments. Finally, we compare some possible approaches to mitigate concept drift and propose a novel data stream pipeline that updates both the classifier and the feature extractor. To do so, we conducted a longitudinal evaluation by (i) classifying malware samples collected over nine years (2009–2018), (ii) reviewing concept drift detection algorithms to attest its pervasiveness, (iii) comparing distinct ML approaches to mitigate the issue, and (iv) proposing an ML data stream pipeline that outperformed literature approaches, achieving an improvement of 22.05 percentage points of F1Score in the DREBIN dataset, and 8.77 in the AndroZoo dataset. |
2018 |
Ceschin, Fabrício; Pinage, Felipe; Castilho, Marcos; Menotti, David; Oliveira, Luis S; Gregio, André The Need for Speed: An Analysis of Brazilian Malware Classifiers Journal Article IEEE Security Privacy, 16 (6), pp. 31-41, 2018, ISSN: 1540-7993. Abstract | Links | BibTeX | Tags: Brazilian malware classifers, Feature extraction, invasive software, learning (artificial intelligence), Machine learning, machine-learning systems, malware, malware classification, pattern classification, security, Security of data, Support vector machines @article{8636415, title = {The Need for Speed: An Analysis of Brazilian Malware Classifiers}, author = {Fabrício Ceschin and Felipe Pinage and Marcos Castilho and David Menotti and Luis S Oliveira and André Gregio}, url = {https://secret.inf.ufpr.br/papers/fabricio_needforspeed.pdf}, doi = {10.1109/MSEC.2018.2875369}, issn = {1540-7993}, year = {2018}, date = {2018-11-01}, journal = {IEEE Security Privacy}, volume = {16}, number = {6}, pages = {31-41}, abstract = {Using a dataset containing about 50,000 samples from Brazilian cyberspace, we show that relying solely on conventional machine-learning systems without taking into account the change of the subject's concept decreases the performance of classification, emphasizing the need to update the decision model immediately after concept drift occurs.}, keywords = {Brazilian malware classifers, Feature extraction, invasive software, learning (artificial intelligence), Machine learning, machine-learning systems, malware, malware classification, pattern classification, security, Security of data, Support vector machines}, pubstate = {published}, tppubtype = {article} } Using a dataset containing about 50,000 samples from Brazilian cyberspace, we show that relying solely on conventional machine-learning systems without taking into account the change of the subject's concept decreases the performance of classification, emphasizing the need to update the decision model immediately after concept drift occurs. |
2023 |
Machine Learning (In) Security: A Stream of Problems Journal Article Digital Threats, 2023, ISSN: 2692-1626, (Just Accepted). |
2022 |
Fast & Furious: On the modelling of malware detection as an evolving data stream Journal Article Expert Systems with Applications, pp. 118590, 2022, ISSN: 0957-4174. |
2018 |
The Need for Speed: An Analysis of Brazilian Malware Classifiers Journal Article IEEE Security Privacy, 16 (6), pp. 31-41, 2018, ISSN: 1540-7993. |