@article{, author = {Uhlig, Frieder; Struppek, Lukas; Hintersdorf, Dominik; Göbel, Thomas; Baier, Harald; Kersting, Kristian}, title = {Combining AI and AM – Improving approximate matching through transformer networks}, editor = {}, booktitle = {}, series = {}, journal = {Forensic Science International: Digital Investigation}, address = {}, publisher = {}, edition = {}, year = {2023}, isbn = {}, volume = {45}, number = {Supplement, DFRWS 2023 USA - Proceedings of the Twenty Third Annual DFRWS Conference}, pages = {301570}, url = {}, doi = {10.1016/j.fsidi.2023.301570}, keywords = {Deep learning ; approximate matching ; DLAM ; Fuzzy hashes ; Approximate matching ; Transformer ; Deep learning ; Artificial intelligence}, abstract = {Approximate matching is a well-known concept in digital forensics to determine the similarity between digital artifacts. An important use case of approximate matching is the reliable and efficient detection of case-relevant data structures on a blacklist (e.g., malware or corporate secrets), if only fragments of the original are available. For instance, if only a cluster of indexed malware is still present during the digital forensic investigation, the approximate matching algorithm shall be able to assign the fragment to the blacklisted malware. However, traditional approximate matching functions like TLSH and ssdeep fail to detect files based on their fragments if the presented piece is relatively small compared to the overall file size (e.g., like one-third of the total file). A second well-known issue with traditional approximate matching algorithms is the lack of scaling due to the ever-increasing lookup databases. In this paper, we propose an improved matching algorithm based on transformer-based models from the field of natural language processing. We call our approach Deep Learning Approximate Matching (DLAM). As a concept from artificial intelligence, DLAM gets knowledge of characteristic blacklisted patterns during its training phase. Then DLAM is able to detect the patterns in a typically much larger file, that is DLAM focuses on the use case of fragment detection. Our evaluation is inspired by two widespread blacklist use cases: the detection of malware (e.g., in JavaScript) and corporate secrets (e.g., pdf or office documents). We reveal that DLAM has three key advantages compared to the prominent conventional approaches TLSH and ssdeep. First, it makes the tedious extraction of known to be bad parts obsolete, which is necessary until now before any search for them with approximate matching algorithms. This allows efficient classification of files on a much larger scale, which is important due to exponentially increasing data to be investigated. Second, depending on the use case, DLAM achieves a similar (in case of mrsh-cf and mrsh-v2) or even significantly higher accuracy (in case of ssdeep and TLSH) in recovering fragments of blacklisted files. For instance, in the case of JavaScript files, our assessment shows that DLAM provides an accuracy of 93% on our test corpus, while TLSH and ssdeep show a classification accuracy of only 50%. Third, we show that DLAM enables the detection of file correlations in the output of TLSH and ssdeep even for fragment sizes, where the respective matching function of TLSH and ssdeep fails.}, note = {}, institution = {Universität der Bundeswehr München, Fakultät für Informatik, INF 6 - Institut für Systemsicherheit, Professur: Baier, Harald}, }