@inproceedings{fdi:010089979, title = {{F}aithful vision-language interpretation via concept bottleneck models}, author = {{L}ai, {S}. and {H}u, {L}. and {W}ang, {J}. and {B}erti-{E}quille, {L}aure and {W}ang, {D}.}, editor = {}, language = {{ENG}}, abstract = {{T}he demand for transparency in healthcare and finance has led to interpretable machine learning ({IML}) models, notably the concept bottleneck models ({CBM}s), valued for their potential in performance and insights into deep neural networks. {H}owever, {CBM}'s reliance on manually annotated data poses challenges. {L}abel-free {CBM}s have emerged to address this, but they remain unstable, affecting their faithfulness as explanatory tools. {T}o address this issue of inherent instability, we introduce a formal definition for an alternative concept called the {F}aithful {V}ision-{L}anguage {C}oncept ({FVLC}) model. {W}e present a methodology for constructing an {FVLC} that satisfies four critical properties. {O}ur extensive experiments on four benchmark datasets using {L}abel-free {CBM} model architectures demonstrate that our {FVLC} outperforms other baselines regarding stability against input and concept set perturbations. {O}ur approach incurs minimal accuracy degradation compared to the vanilla {CBM}, making it a promising solution for reliable and faithful model interpretation.}, keywords = {}, numero = {}, pages = {24 multigr.}, booktitle = {}, year = {2024}, URL = {https://www.documentation.ird.fr/hor/fdi:010089979}, }