@article{, author = {Kersting, Joschka; Maoro, Falk; Geierhos, Michaela}, title = {Towards comparable ratings: Exploring bias in German physician reviews}, editor = {}, booktitle = {}, series = {}, journal = {Data & Knowledge Engineering}, address = {}, publisher = {}, edition = {}, year = {2023}, isbn = {}, volume = {148}, number = {}, pages = {102235}, url = {https://doi.org/10.1016/j.datak.2023.102235}, doi = {10.1016/j.datak.2023.102235}, keywords = {Language model fairness ; Aspect phrase classification ; Grade prediction ; Physician reviews}, abstract = {In this study, we evaluate the impact of gender-biased data from German-language physician reviews on the fairness of fine-tuned language models. For two different downstream tasks, we use data reported to be gender biased and aggregate it with annotations. First, we propose a new approach to aspect-based sentiment analysis that allows identifying, extracting, and classifying implicit and explicit aspect phrases and their polarity within a single model. The second task we present is grade prediction, where we predict the overall grade of a review on the basis of the review text. For both tasks, we train numerous transformer models and evaluate their performance. The aggregation of sensitive attributes, such as a physician’s gender and migration background, with individual text reviews allows us to measure the performance of the models with respect to these sensitive groups. These group-wise performance measures act as extrinsic bias measures for our downstream tasks. In addition, we translate several gender-specific templates of the intrinsic bias metrics into the German language and evaluate our fine-tuned models. Based on this set of tasks, fine-tuned models, and intrinsic and extrinsic bias measures, we perform correlation analyses between intrinsic and extrinsic bias measures. In terms of sensitive groups and effect sizes, our bias measure results show different directions. Furthermore, correlations between measures of intrinsic and extrinsic bias can be observed in different directions. This leads us to conclude that gender-biased data does not inherently lead to biased models. Other variables, such as template dependency for intrinsic measures and label distribution in the data, must be taken into account as they strongly influence the metric results. Therefore, we suggest that metrics and templates should be chosen according to the given task and the biases to be assessed.}, note = {}, institution = {Universität der Bundeswehr München, Fakultät für Informatik, INF 7 - Institut für Datensicherheit, Professur: Geierhos, Michaela}, }