@article{fdi:010085566, title = {{T}he need for interpretable features : motivation and taxonomy}, author = {{Z}ytek, {A}. and {A}rnaldo, {I}. and {L}iu, {D}. and {B}erti-{E}quille, {L}aure and {V}eeramachaneni, {K}.}, editor = {}, language = {{ENG}}, abstract = {{T}hrough extensive experience developing and explaining machine learning ({ML}) applications for real-world domains, we have learned that {ML} models are only as interpretable as their features. {E}ven simple, highly interpretable model types such as regression models can be difficult or impossible to understand if they use uninterpretable features. {D}ifferent users, especially those using {ML} models for decision-making in their domains, may require different levels and types of feature interpretability. {F}urthermore, based on our experiences, we claim that the term 'interpretable feature' is not specific nor detailed enough to capture the full extent to which features impact the usefulness of {ML} explanations. {I}n this paper, we motivate and discuss three key lessons: 1) more attention should be given to what we refer to as the interpretable feature space, or the state of features that are useful to domain experts taking real-world actions, 2) a formal taxonomy is needed of the feature properties that may be required by these domain experts (we propose a partial taxonomy in this paper), and 3) transforms that take data from the model-ready state to an interpretable form are just as essential as traditional {ML} transforms that prepare features for the model.}, keywords = {}, booktitle = {}, journal = {{ACM} {SIGKDD} {E}xplorations {N}ewsletter}, volume = {24}, numero = {1}, pages = {1--13}, ISSN = {1931-0145}, year = {2022}, DOI = {10.1145/3544903.3544905}, URL = {https://www.documentation.ird.fr/hor/fdi:010085566}, }