@inproceedings{, author = {Wang, Weixing; Yang, Haojin; Meinel, Christoph; Özkan, Yagiz Hasan; Bermudez Serna, Cristian; Mas-Machuca, Carmen}, title = {Feature Distribution Shift Mitigation with Contrastive Pretraining for Intrusion Detection}, editor = {IEEE}, booktitle = {Network Traffic Measurement and Analysis Conference}, series = {}, journal = {}, address = {}, publisher = {}, edition = {}, year = {2024}, isbn = {}, volume = {}, number = {}, pages = {}, url = {}, doi = {}, keywords = {}, abstract = {In recent years, there has been a growing interest in using Machine Learning (ML), especially Deep Learning (DL) to solve Network Intrusion Detection (NID) problems. However, the feature distribution shift problem remains a difficulty, because the change in features’ distributions over time negatively impacts the model’s performance. As one promising solution, model pretraining has emerged as a novel training paradigm, which brings robustness against feature distribution shift and has proven to be successful in Computer Vision (CV) and Natural Language Processing (NLP). To verify whether this paradigm is beneficial for NID problem, we propose SwapCon, a ML model in the context of NID, which compresses shiftinvariant feature information during the pretraining stage and refines during the finetuning stage. We exemplify the evidence of feature distribution shift using the Kyoto2006+ dataset. We demonstrate how pretraining a model with the proper size can increase robustness against feature distribution shifts by over 8%. Moreover, we show how an adequate numerical embedding strategy also enhances the performance of pretrained models. Further experiments show that the proposed SwapCon model also outperforms eXtreme Gradient Boosting (XGBoost) and K-Nearest Neighbor (KNN) based models by a large margin.}, note = {}, institution = {Universität der Bundeswehr München, Fakultät für Elektrotechnik und Informationstechnik, EIT 3 - Institut für Informationstechnik, Professur: Mas-Machuca, Carmen}, }