Publications
2025
- arXivBeneath the Surface: How Large Language Models Reflect Hidden Bias2025
@misc{pan2025beneathsurfacelargelanguage, title = {Beneath the Surface: How Large Language Models Reflect Hidden Bias}, author = {Pan, Jinhao and Raj, Chahat and Yao, Ziyu and Zhu, Ziwei}, year = {2025}, eprint = {2502.19749}, archiveprefix = {arXiv}, primaryclass = {cs.CL}, url = {https://arxiv.org/abs/2502.19749}, }
- WSDM’25Combating Heterogeneous Model Biases in Recommendations via BoostingJinhao Pan , James Caverlee, and Ziwei ZhuIn Proceedings of the Eighteenth ACM International Conference on Web Search and Data Mining , 2025
Collaborative Filtering (CF) based recommenders often exhibit model biases, delivering strong recommendation utility to certain users or items at the expense of others. Prior research approaches these biases as isolated and standalone issues, ignoring their interconnected nature and developing separate methods, thereby compromising the specialized debiasing efforts. Thus, we introduce a boosting-based framework designed to alleviate a broad spectrum of biases. This framework employs a series of sub-models, each tailored for different user and item subgroups. Theoretically, our model ensures an exponentially decreasing upper bound on the training loss across all user and item types with increasing boosting iterations. Extensive experiments demonstrate its superior debiasing capabilities against state-of-the-art methods across four model bias types. Appendix, data and code are available at https://github.com/JP-25/CFBoost
@inproceedings{pan2025combating, title = {Combating Heterogeneous Model Biases in Recommendations via Boosting}, author = {Pan, Jinhao and Caverlee, James and Zhu, Ziwei}, booktitle = {Proceedings of the Eighteenth ACM International Conference on Web Search and Data Mining}, pages = {222--231}, year = {2025}, doi = {10.1145/3701551.3703505}, }
2024
- ECIR’24Countering Mainstream Bias via End-to-End Adaptive Local LearningIn European Conference on Information Retrieval , 2024
Collaborative filtering (CF) based recommendations suffer from mainstream bias – where mainstream users are favored over niche users, leading to poor recommendation quality for many long-tail users. In this paper, we identify two root causes of this mainstream bias: (i) discrepancy modeling, whereby CF algorithms focus on modeling mainstream users while neglecting niche users with unique preferences; and (ii) unsynchronized learning, where niche users require more training epochs than mainstream users to reach peak performance. Targeting these causes, we propose a novel end-To-end Adaptive Local Learning (TALL) framework to provide high-quality recommendations to both mainstream and niche users. TALL uses a loss-driven Mixture-of-Experts module to adaptively ensemble experts to provide customized local models for different users. Further, it contains an adaptive weight module to synchronize the learning paces of different users by dynamically adjusting weights in the loss. Extensive experiments demonstrate the state-of-the-art performance of the proposed model. Code and data are provided at https://github.com/JP-25/end-To-end-Adaptive-Local-Leanring-TALL-.
@inproceedings{pan2024countering, title = {Countering Mainstream Bias via End-to-End Adaptive Local Learning}, author = {Pan, Jinhao and Zhu, Ziwei and Wang, Jianling and Lin, Allen and Caverlee, James}, booktitle = {European Conference on Information Retrieval}, pages = {75--89}, year = {2024}, organization = {Springer}, isbn = {978-3-031-56069-9}, }