Skip to content

Latest commit

 

History

History
949 lines (812 loc) · 37.9 KB

citation.md

File metadata and controls

949 lines (812 loc) · 37.9 KB

@misc{zhang2021adversarial, title={Adversarial Robustness through the Lens of Causality}, author={Yonggang Zhang and Mingming Gong and Tongliang Liu and Gang Niu and Xinmei Tian and Bo Han and Bernhard Schölkopf and Kun Zhang}, year={2021}, eprint={2106.06196}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{tang2021adversarial, title={Adversarial Visual Robustness by Causal Intervention}, author={Kaihua Tang and Mingyuan Tao and Hanwang Zhang}, year={2021}, eprint={2106.09534}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{niu2021counterfactual, title={Counterfactual VQA: A Cause-Effect Look at Language Bias}, author={Yulei Niu and Kaihua Tang and Hanwang Zhang and Zhiwu Lu and Xian-Sheng Hua and Ji-Rong Wen}, year={2021}, eprint={2006.04315}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@inproceedings{ijcai2021-182, title = {What If We Could Not See? Counterfactual Analysis for Egocentric Action Anticipation}, author = {Zhang, Tianyu and Min, Weiqing and Yang, Jiahao and Liu, Tao and Jiang, Shuqiang and Rui, Yong}, booktitle = {Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, {IJCAI-21}}, publisher = {International Joint Conferences on Artificial Intelligence Organization}, editor = {Zhi-Hua Zhou}, pages = {1316--1322}, year = {2021}, month = {8}, note = {Main Track} doi = {10.24963/ijcai.2021/182}, url = {https://doi.org/10.24963/ijcai.2021/182}, }

@misc{yue2020interventional, title={Interventional Few-Shot Learning}, author={Zhongqi Yue and Hanwang Zhang and Qianru Sun and Xian-Sheng Hua}, year={2020}, eprint={2009.13000}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@INPROCEEDINGS{9156448,

author={Abbasnejad, Ehsan and Teney, Damien and Parvaneh, Amin and Shi, Javen and van den Hengel, Anton},

booktitle={2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},

title={Counterfactual Vision and Language Learning},

year={2020},

volume={},

number={},

pages={10041-10051},

doi={10.1109/CVPR42600.2020.01006}}

@misc{tang2020unbiased, title={Unbiased Scene Graph Generation from Biased Training}, author={Kaihua Tang and Yulei Niu and Jianqiang Huang and Jiaxin Shi and Hanwang Zhang}, year={2020}, eprint={2002.11949}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{hvilshøj2021ecinn, title={ECINN: Efficient Counterfactuals from Invertible Neural Networks}, author={Frederik Hvilshøj and Alexandros Iosifidis and Ira Assent}, year={2021}, eprint={2103.13701}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{reddy2021causally, title={On Causally Disentangled Representations}, author={Abbavaram Gowtham Reddy and Benin Godfrey L and Vineeth N Balasubramanian}, year={2021}, eprint={2112.05746}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@ARTICLE{9656623,

author={Qin, Wei and Zhang, Hanwang and Hong, Richang and Lim, Ee-Peng and Sun, Qianru},

journal={IEEE Transactions on Multimedia},

title={Causal Interventional Training for Image Recognition},

year={2021},

volume={},

number={},

pages={1-1},

doi={10.1109/TMM.2021.3136717}}

@misc{shao2021improving, title={Improving Weakly-supervised Object Localization via Causal Intervention}, author={Feifei Shao and Yawei Luo and Li Zhang and Lu Ye and Siliang Tang and Yi Yang and Jun Xiao}, year={2021}, eprint={2104.10351}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{sun2021latent, title={Latent Causal Invariant Model}, author={Xinwei Sun and Botong Wu and Xiangyu Zheng and Chang Liu and Wei Chen and Tao Qin and Tie-yan Liu}, year={2021}, eprint={2011.02203}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{qi2020causal, title={Two Causal Principles for Improving Visual Dialog}, author={Jiaxin Qi and Yulei Niu and Jianqiang Huang and Hanwang Zhang}, year={2020}, eprint={1911.10496}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{wang2021weaklysupervised, title={Weakly-Supervised Video Object Grounding via Causal Intervention}, author={Wei Wang and Junyu Gao and Changsheng Xu}, year={2021}, eprint={2112.00475}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{chen2021unbiased, title={Towards Unbiased Visual Emotion Recognition via Causal Intervention}, author={Yuedong Chen and Xu Yang and Tat-Jen Cham and Jianfei Cai}, year={2021}, eprint={2107.12096}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{chen2021human, title={Human Trajectory Prediction via Counterfactual Analysis}, author={Guangyi Chen and Junlong Li and Jiwen Lu and Jie Zhou}, year={2021}, eprint={2107.14202}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{nan2021interventional, title={Interventional Video Grounding with Dual Contrastive Learning}, author={Guoshun Nan and Rui Qiao and Yao Xiao and Jun Liu and Sicong Leng and Hao Zhang and Wei Lu}, year={2021}, eprint={2106.11013}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{jiang2021tsmobn, title={TsmoBN: Generalization for Unseen Clients in Federated Learning via Causal Intervention}, author={Meirui Jiang and Xiaofei Zhang and Michael Kamp and Xiaoxiao Li and Qi Dou}, year={2021}, eprint={2110.09974}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{yuan2021learning, title={Learning Domain-Invariant Relationship with Instrumental Variable for Domain Generalization}, author={Junkun Yuan and Xu Ma and Kun Kuang and Ruoxuan Xiong and Mingming Gong and Lanfen Lin}, year={2021}, eprint={2110.01438}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{liu2021blessings, title={The Blessings of Unlabeled Background in Untrimmed Videos}, author={Yuan Liu and Jingyuan Chen and Zhenfang Chen and Bing Deng and Jianqiang Huang and Hanwang Zhang}, year={2021}, eprint={2103.13183}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{mahajan2021domain, title={Domain Generalization using Causal Matching}, author={Divyat Mahajan and Shruti Tople and Amit Sharma}, year={2021}, eprint={2006.07500}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@inbook{10.1145/3474085.3475472, author = {Sun, Pengzhan and Wu, Bo and Li, Xunsong and Li, Wen and Duan, Lixin and Gan, Chuang}, title = {Counterfactual Debiasing Inference for Compositional Action Recognition}, year = {2021}, isbn = {9781450386517}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3474085.3475472}, abstract = {Compositional action recognition is a novel challenge in the computer vision community and focuses on revealing the different combinations of verbs and nouns instead of treating subject-object interactions in videos as individual instances only. Existing methods tackle this challenging task by simply ignoring appearance information or fusing object appearances with dynamic instance tracklets. However, those strategies usually do not perform well for unseen action instances. For that, in this work we propose a novel learning framework called Counterfactual Debiasing Network (CDN) to improve the model generalization ability by removing the interference introduced by visual appearances of objects/subjects. It explicitly learns the appearance information in action representations and later removes the effect of such information in a causal inference manner. Specifically, we use tracklets and video content to model the factual inference by considering both appearance information and structure information. In contrast, only video content with appearance information is leveraged in the counterfactual inference. With the two inferences, we conduct a causal graph which captures and removes the bias introduced by the appearance information by subtracting the result of the counterfactual inference from that of the factual inference. By doing that, our proposed CDN method can better recognize unseen action instances by debiasing the effect of appearances. Extensive experiments on the Something-Else dataset clearly show the effectiveness of our proposed CDN over existing state-of-the-art methods.}, booktitle = {Proceedings of the 29th ACM International Conference on Multimedia}, pages = {3220–3228}, numpages = {9} }

@misc{yang2021deconfounded, title={Deconfounded Video Moment Retrieval with Causal Intervention}, author={Xun Yang and Fuli Feng and Wei Ji and Meng Wang and Tat-Seng Chua}, year={2021}, eprint={2106.01534}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@inbook{10.1145/3474085.3475540, author = {Li, Yicong and Yang, Xun and Shang, Xindi and Chua, Tat-Seng}, title = {Interventional Video Relation Detection}, year = {2021}, isbn = {9781450386517}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3474085.3475540}, abstract = {Video Visual Relation Detection (VidVRD) aims to semantically describe the dynamic interactions across visual concepts localized in a video in the form of subject, predicate, object. It can help to mitigate the semantic gap between vision and language in video understanding, thus receiving increasing attention in multimedia communities. Existing efforts primarily leverage the multimodal/spatio-temporal feature fusion to augment the representation of object trajectories as well as their interactions and formulate the prediction of predicates as a multi-class classification task. Despite their effectiveness, existing models ignore the severe long-tailed bias in VidVRD datasets. As a result, the models' prediction will be easily biased towards the popular head predicates (e.g., next-to and in-front-of), thus leading to poor generalizability.To fill the research gap, this paper proposes an Interventional Video Relation Detection (IVRD) approach that aims to improve not only the accuracy but also the robustness of the model prediction. Specifically, to better model the high-level visual predicate, our IVRD consists of two key components: 1) we first learn a set of predicate prototypes, where each prototype vector describes a set of relation references with the same predicate; and 2) we apply a causality-inspired intervention on the model input subject, object, which forces the model to fairly incorporate each possible predicate prototype into consideration. We expect the model to focus more on the visual content of the dynamic interaction between subject and object, rather than the spurious correlations between the model input and predicate labels. Extensive experiments on two popular benchmark datasets show the effectiveness of IVRD and also its advantages in reducing the bad long-tailed bias.}, booktitle = {Proceedings of the 29th ACM International Conference on Multimedia}, pages = {4091–4099}, numpages = {9} }

@misc{wang2020visual, title={Visual Commonsense R-CNN}, author={Tan Wang and Jianqiang Huang and Hanwang Zhang and Qianru Sun}, year={2020}, eprint={2002.12204}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{chen2020counterfactual, title={Counterfactual Samples Synthesizing for Robust Visual Question Answering}, author={Long Chen and Xin Yan and Jun Xiao and Hanwang Zhang and Shiliang Pu and Yueting Zhuang}, year={2020}, eprint={2003.06576}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{yue2021counterfactual, title={Counterfactual Zero-Shot and Open-Set Visual Recognition}, author={Zhongqi Yue and Tan Wang and Hanwang Zhang and Qianru Sun and Xian-Sheng Hua}, year={2021}, eprint={2103.00887}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{shen2021conterfactual, title={Conterfactual Generative Zero-Shot Semantic Segmentation}, author={Feihong Shen and Jun Liu and Ping Hu}, year={2021}, eprint={2106.06360}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{yue2021transporting, title={Transporting Causal Mechanisms for Unsupervised Domain Adaptation}, author={Zhongqi Yue and Qianru Sun and Xian-Sheng Hua and Hanwang Zhang}, year={2021}, eprint={2107.11055}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{yi2020clevrer, title={CLEVRER: CoLlision Events for Video REpresentation and Reasoning}, author={Kexin Yi and Chuang Gan and Yunzhu Li and Pushmeet Kohli and Jiajun Wu and Antonio Torralba and Joshua B. Tenenbaum}, year={2020}, eprint={1910.01442}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{chang2021robust, title={Towards Robust Classification Model by Counterfactual and Invariant Data Generation}, author={Chun-Hao Chang and George Alexandru Adam and Anna Goldenberg}, year={2021}, eprint={2106.01127}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{fu2020counterfactual, title={Counterfactual Vision-and-Language Navigation via Adversarial Path Sampling}, author={Tsu-Jui Fu and Xin Eric Wang and Matthew Peterson and Scott Grafton and Miguel Eckstein and William Yang Wang}, year={2020}, eprint={1911.07308}, archivePrefix={arXiv}, primaryClass={cs.CV} } t @misc{pan2019questionconditioned, title={Question-Conditioned Counterfactual Image Generation for VQA}, author={Jingjing Pan and Yash Goyal and Stefan Lee}, year={2019}, eprint={1911.06352}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{sauer2021counterfactual, title={Counterfactual Generative Networks}, author={Axel Sauer and Andreas Geiger}, year={2021}, eprint={2101.06046}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@inproceedings{liang-etal-2020-learning, title = "Learning to Contrast the Counterfactual Samples for Robust Visual Question Answering", author = "Liang, Zujie and Jiang, Weitao and Hu, Haifeng and Zhu, Jiaying", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2020.emnlp-main.265", doi = "10.18653/v1/2020.emnlp-main.265", pages = "3285--3292", abstract = "In the task of Visual Question Answering (VQA), most state-of-the-art models tend to learn spurious correlations in the training set and achieve poor performance in out-of-distribution test data. Some methods of generating counterfactual samples have been proposed to alleviate this problem. However, the counterfactual samples generated by most previous methods are simply added to the training data for augmentation and are not fully utilized. Therefore, we introduce a novel self-supervised contrastive learning mechanism to learn the relationship between original samples, factual samples and counterfactual samples. With the better cross-modal joint embeddings learned from the auxiliary training objective, the reasoning capability and robustness of the VQA model are boosted significantly. We evaluate the effectiveness of our method by surpassing current state-of-the-art models on the VQA-CP dataset, a diagnostic benchmark for assessing the VQA model{'}s robustness.", }

@misc{ilse2020selecting, title={Selecting Data Augmentation for Simulating Interventions}, author={Maximilian Ilse and Jakub M. Tomczak and Patrick Forré}, year={2020}, eprint={2005.01856}, archivePrefix={arXiv}, primaryClass={stat.ML} }

@misc{rao2021counterfactual, title={Counterfactual Attention Learning for Fine-Grained Visual Categorization and Re-identification}, author={Yongming Rao and Guangyi Chen and Jiwen Lu and Jie Zhou}, year={2021}, eprint={2108.08728}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{rosenberg2021vqa, title={Are VQA Systems RAD? Measuring Robustness to Augmented Data with Focused Interventions}, author={Daniel Rosenberg and Itai Gat and Amir Feder and Roi Reichart}, year={2021}, eprint={2106.04484}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{neto2020causalityaware, title={Causality-aware counterfactual confounding adjustment for feature representations learned by deep models}, author={Elias Chaibub Neto}, year={2020}, eprint={2004.09466}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@article{Pedreschi_Giannotti_Guidotti_Monreale_Ruggieri_Turini_2019, title={Meaningful Explanations of Black Box AI Decision Systems}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5050}, DOI={10.1609/aaai.v33i01.33019780}, abstractNote={<p>Black box AI systems for automated decision making, often based on machine learning over (big) data, map a user’s features into a class or a score without exposing the reasons why. This is problematic not only for lack of transparency, but also for possible biases inherited by the algorithms from human prejudices and collection artifacts hidden in the training data, which may lead to unfair or wrong decisions. We focus on the urgent open challenge of how to construct meaningful explanations of opaque AI/ML systems, introducing the local-toglobal framework for black box explanation, articulated along three lines: (i) the language for expressing explanations in terms of logic rules, with statistical and causal interpretation; (ii) the inference of local explanations for revealing the decision rationale for a specific case, by auditing the black box in the vicinity of the target instance; (iii), the bottom-up generalization of many local explanations into simple global ones, with algorithms that optimize for quality and comprehensibility. We argue that the local-first approach opens the door to a wide variety of alternative solutions along different dimensions: a variety of data sources (relational, text, images, etc.), a variety of learning problems (multi-label classification, regression, scoring, ranking), a variety of languages for expressing meaningful explanations, a variety of means to audit a black box.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pedreschi, Dino and Giannotti, Fosca and Guidotti, Riccardo and Monreale, Anna and Ruggieri, Salvatore and Turini, Franco}, year={2019}, month={Jul.}, pages={9780-9784} }

@article{2019, title={When Causal Intervention Meets Adversarial Examples and Image Masking for Deep Neural Networks}, url={http://dx.doi.org/10.1109/ICIP.2019.8803554}, DOI={10.1109/icip.2019.8803554}, journal={2019 IEEE International Conference on Image Processing (ICIP)}, publisher={IEEE}, author={Yang, Chao-Han Huck and Liu, Yi-Chieh and Chen, Pin-Yu and Ma, Xiaoli and Tsai, Yi-Chang James}, year={2019}, month={Sep} }

@misc{goyal2019counterfactual, title={Counterfactual Visual Explanations}, author={Yash Goyal and Ziyan Wu and Jan Ernst and Dhruv Batra and Devi Parikh and Stefan Lee}, year={2019}, eprint={1904.07451}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{rodriguez2021trivial, title={Beyond Trivial Counterfactual Explanations with Diverse Valuable Explanations}, author={Pau Rodriguez and Massimo Caccia and Alexandre Lacoste and Lee Zamparo and Issam Laradji and Laurent Charlin and David Vazquez}, year={2021}, eprint={2103.10226}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{wang2020scout, title={SCOUT: Self-aware Discriminant Counterfactual Explanations}, author={Pei Wang and Nuno Vasconcelos}, year={2020}, eprint={2004.07769}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{oh2021born, title={Born Identity Network: Multi-way Counterfactual Map Generation to Explain a Classifier's Decision}, author={Kwanseok Oh and Jee Seok Yoon and Heung-Il Suk}, year={2021}, eprint={2011.10381}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{smith2020counterfactual, title={Counterfactual Explanation and Causal Inference in Service of Robustness in Robot Control}, author={Simón C. Smith and Subramanian Ramamoorthy}, year={2020}, eprint={2009.08856}, archivePrefix={arXiv}, primaryClass={cs.RO} }

@misc{jung2021counterfactual, title={Counterfactual Explanation Based on Gradual Construction for Deep Networks}, author={Hong-Gyu Jung and Sin-Han Kang and Hee-Dong Kim and Dong-Ok Won and Seong-Whan Lee}, year={2021}, eprint={2008.01897}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@article{Akula_Wang_Zhu_2020, title={CoCoX: Generating Conceptual and Counterfactual Explanations via Fault-Lines}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5643}, DOI={10.1609/aaai.v34i03.5643}, abstractNote={<p>We present CoCoX (short for Conceptual and Counterfactual Explanations), a model for explaining decisions made by a deep convolutional neural network (CNN). In Cognitive Psychology, the factors (or semantic-level features) that humans zoom in on when they imagine an alternative to a model prediction are often referred to as <em>fault-lines</em>. Motivated by this, our CoCoX model explains decisions made by a CNN using fault-lines. Specifically, given an input image <em>I</em> for which a CNN classification model <em>M</em> predicts class <em>c</em><sub><em>pred</em></sub>, our fault-line based explanation identifies the minimal semantic-level features (e.g., <em>stripes</em> on zebra, <em>pointed ears</em> of dog), referred to as explainable concepts, that need to be added to or deleted from <em>I</em> in order to alter the classification category of <em>I</em> by <em>M</em> to another specified class <em>c</em><sub><em>alt</em></sub>. We argue that, due to the conceptual and counterfactual nature of fault-lines, our CoCoX explanations are practical and more natural for both expert and non-expert users to understand the internal workings of complex deep learning models. Extensive quantitative and qualitative experiments verify our hypotheses, showing that CoCoX significantly outperforms the state-of-the-art explainable AI models. Our implementation is available at https://github.com/arjunakula/CoCoX&lt;/p&gt;}, number={03}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Akula, Arjun and Wang, Shuai and Zhu, Song-Chun}, year={2020}, month={Apr.}, pages={2594-2601} }

@misc{akula2021cxtom, title={CX-ToM: Counterfactual Explanations with Theory-of-Mind for Enhancing Human Trust in Image Recognition Models}, author={Arjun R. Akula and Keze Wang and Changsong Liu and Sari Saba-Sadiya and Hongjing Lu and Sinisa Todorovic and Joyce Chai and Song-Chun Zhu}, year={2021}, eprint={2109.01401}, archivePrefix={arXiv}, primaryClass={cs.AI} }

@misc{höltgen2021deduce, title={DeDUCE: Generating Counterfactual Explanations Efficiently}, author={Benedikt Höltgen and Lisa Schut and Jan M. Brauner and Yarin Gal}, year={2021}, eprint={2111.15639}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{thiagarajan2021designing, title={Designing Counterfactual Generators using Deep Model Inversion}, author={Jayaraman J. Thiagarajan and Vivek Narayanaswamy and Deepta Rajan and Jason Liang and Akshay Chaudhari and Andreas Spanias}, year={2021}, eprint={2109.14274}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{vermeire2020explainable, title={Explainable Image Classification with Evidence Counterfactual}, author={Tom Vermeire and David Martens}, year={2020}, eprint={2004.07511}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{parafita2019explaining, title={Explaining Visual Models by Causal Attribution}, author={Álvaro Parafita and Jordi Vitrià}, year={2019}, eprint={1909.08891}, archivePrefix={arXiv}, primaryClass={stat.ML} }

@misc{singla2021explaining, title={Explaining the Black-box Smoothly- A Counterfactual Approach}, author={Sumedha Singla and Brian Pollack and Stephen Wallace and Kayhan Batmanghelich}, year={2021}, eprint={2101.04230}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{sani2021explaining, title={Explaining the Behavior of Black-Box Prediction Algorithms with Causal Learning}, author={Numair Sani and Daniel Malinsky and Ilya Shpitser}, year={2021}, eprint={2006.02482}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{goyal2020explaining, title={Explaining Classifiers with Causal Concept Effect (CaCE)}, author={Yash Goyal and Amir Feder and Uri Shalit and Been Kim}, year={2020}, eprint={1907.07165}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{zhao2020fast, title={Fast Real-time Counterfactual Explanations}, author={Yunxia Zhao}, year={2020}, eprint={2007.05684}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{liu2019generative, title={Generative Counterfactual Introspection for Explainable Deep Learning}, author={Shusen Liu and Bhavya Kailkhura and Donald Loveland and Yong Han}, year={2019}, eprint={1907.03077}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{yang2021generative, title={Generative Counterfactuals for Neural Networks via Attribute-Informed Perturbation}, author={Fan Yang and Ninghao Liu and Mengnan Du and Xia Hu}, year={2021}, eprint={2101.06930}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{plumb2021finding, title={Finding and Fixing Spurious Patterns with Explanations}, author={Gregory Plumb and Marco Tulio Ribeiro and Ameet Talwalkar}, year={2021}, eprint={2106.02112}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{white2021contrastive, title={Contrastive Counterfactual Visual Explanations With Overdetermination}, author={Adam White and Kwun Ho Ngan and James Phelan and Saman Sadeghi Afgeh and Kevin Ryan and Constantino Carlos Reyes-Aldasoro and Artur d'Avila Garcez}, year={2021}, eprint={2106.14556}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@article{article, author = {J. Thiagarajan, Jayaraman and Thopalli, Kowshik and Rajan, Deepta and Turaga, Pavan}, year = {2022}, month = {01}, pages = {}, title = {Training calibration-based counterfactual explainers for deep learning models in medical image analysis}, volume = {12}, journal = {Scientific Reports}, doi = {10.1038/s41598-021-04529-5} }

@inproceedings{ijcai2020-742, title = {Generating Natural Counterfactual Visual Explanations}, author = {Zhao, Wenqi and Oyama, Satoshi and Kurihara, Masahito}, booktitle = {Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, {IJCAI-20}}, publisher = {International Joint Conferences on Artificial Intelligence Organization}, editor = {Christian Bessiere}, pages = {5204--5205}, year = {2020}, month = {7}, note = {Doctoral Consortium} doi = {10.24963/ijcai.2020/742}, url = {https://doi.org/10.24963/ijcai.2020/742}, }

@misc{ sixt2021interpretability, title={Interpretability Through Invertibility: A Deep Convolutional Network With Ideal Counterfactuals And Isosurfaces}, author={Leon Sixt and Martin Schuessler and Philipp Wei{\ss} and Tim Landgraf}, year={2021}, url={https://openreview.net/forum?id=8YFhXYe1Ps} }

@misc{eckstein2021discriminative, title={Discriminative Attribution from Counterfactuals}, author={Nils Eckstein and Alexander S. Bates and Gregory S. X. E. Jefferis and Jan Funke}, year={2021}, eprint={2109.13412}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{wang2021proactive, title={Proactive Pseudo-Intervention: Causally Informed Contrastive Learning For Interpretable Vision Models}, author={Dong Wang and Yuewei Yang and Chenyang Tao and Zhe Gan and Liqun Chen and Fanjie Kong and Ricardo Henao and Lawrence Carin}, year={2021}, eprint={2012.03369}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{gat2021latent, title={Latent Space Explanation by Intervention}, author={Itai Gat and Guy Lorberbom and Idan Schwartz and Tamir Hazan}, year={2021}, eprint={2112.04895}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@misc{beery2018recognition, title={Recognition in Terra Incognita}, author={Sara Beery and Grant van Horn and Pietro Perona}, year={2018}, eprint={1807.04975}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@article{2021, title={Model-Based Counterfactual Synthesizer for Interpretation}, url={http://dx.doi.org/10.1145/3447548.3467333}, DOI={10.1145/3447548.3467333}, journal={Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining}, publisher={ACM}, author={Yang, Fan and Alva, Sahan Suresh and Chen, Jiahao and Hu, Xia}, year={2021}, month={Aug} }

@article{2021_friesleben, title={The Intriguing Relation Between Counterfactual Explanations and Adversarial Examples}, ISSN={1572-8641}, url={http://dx.doi.org/10.1007/s11023-021-09580-9}, DOI={10.1007/s11023-021-09580-9}, journal={Minds and Machines}, publisher={Springer Science and Business Media LLC}, author={Freiesleben, Timo}, year={2021}, month={Oct} }

@misc{dong2021pretrained, title={How Should Pre-Trained Language Models Be Fine-Tuned Towards Adversarial Robustness?}, author={Xinhsuai Dong and Luu Anh Tuan and Min Lin and Shuicheng Yan and Hanwang Zhang}, year={2021}, eprint={2112.11668}, archivePrefix={arXiv}, primaryClass={cs.CL} }

@misc{niu2021introspective, title={Introspective Distillation for Robust Question Answering}, author={Yulei Niu and Hanwang Zhang}, year={2021}, eprint={2111.01026}, archivePrefix={arXiv}, primaryClass={cs.CV} }

@misc{lopezpaz2017discovering, title={Discovering Causal Signals in Images}, author={David Lopez-Paz and Robert Nishihara and Soumith Chintala and Bernhard Schölkopf and Léon Bottou}, year={2017}, eprint={1605.08179}, archivePrefix={arXiv}, primaryClass={stat.ML} }

@article{2020, title={Causality matters in medical imaging}, volume={11}, ISSN={2041-1723}, url={http://dx.doi.org/10.1038/s41467-020-17478-w}, DOI={10.1038/s41467-020-17478-w}, number={1}, journal={Nature Communications}, publisher={Springer Science and Business Media LLC}, author={Castro, Daniel C. and Walker, Ian and Glocker, Ben}, year={2020}, month={Jul} }

@misc{zhang2018mixup, title={mixup: Beyond Empirical Risk Minimization}, author={Hongyi Zhang and Moustapha Cisse and Yann N. Dauphin and David Lopez-Paz}, year={2018}, eprint={1710.09412}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@article{10.1145/3241036, author = {Pearl, Judea}, title = {The Seven Tools of Causal Inference, with Reflections on Machine Learning}, year = {2019}, issue_date = {March 2019}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, volume = {62}, number = {3}, issn = {0001-0782}, url = {https://doi.org/10.1145/3241036}, doi = {10.1145/3241036}, abstract = {The kind of causal inference seen in natural human thought can be "algorithmitized" to help produce human-level machine intelligence.}, journal = {Commun. ACM}, month = {feb}, pages = {54–60}, numpages = {7} }

@article{10.2307/2337329, ISSN = {00063444}, URL = {http://www.jstor.org/stable/2337329}, abstract = {The primary aim of this paper is to show how graphical models can be used as a mathematical language for integrating statistical and subject-matter information. In particular, the paper develops a principled, nonparametric framework for causal inference, in which diagrams are queried to determine if the assumptions available are sufficient for identifying causal effects from nonexperimental data. If so the diagrams can be queried to produce mathematical expressions for causal effects in terms of observed distributions; otherwise, the diagrams can be queried to suggest additional observations or auxiliary experiments from which the desired inferences can be obtained.}, author = {Judea Pearl}, journal = {Biometrika}, number = {4}, pages = {669--688}, publisher = {[Oxford University Press, Biometrika Trust]}, title = {Causal Diagrams for Empirical Research}, volume = {82}, year = {1995} }

@article{pearl2000models, title={Models, reasoning and inference}, author={Pearl, Judea and others}, journal={Cambridge, UK: CambridgeUniversityPress}, volume={19}, year={2000} }

@book{pearl2018book, title={The book of why: the new science of cause and effect}, author={Pearl, Judea and Mackenzie, Dana}, year={2018}, publisher={Basic books} }

@article{pearl2019seven, title={The seven tools of causal inference, with reflections on machine learning}, author={Pearl, Judea}, journal={Communications of the ACM}, volume={62}, number={3}, pages={54--60}, year={2019}, publisher={ACM New York, NY, USA} }

@article{bareinboim2020pearl, title={On Pearl’s hierarchy and the foundations of causal inference}, author={Bareinboim, Elias and Correa, Juan D and Ibeling, Duligur and Icard, Thomas}, journal={ACM Special Volume in Honor of Judea Pearl (provisional title)}, volume={2}, number={3}, pages={4}, year={2020} }

@phdthesis{li2021unit, title={Unit selection based on counterfactual logic}, author={Li, Ang}, year={2021}, school={UCLA} }

@article{ang2019unit, title={Unit Selection Based on Counterfactual Logic}, author={Ang Li, Judea Pearl}, year={2019} }

@article{sundar1998does, title={Does web advertising work? Memory for print vs. online media}, author={Sundar, S Shyam and Narayan, Sunetra and Obregon, Rafael and Uppal, Charu}, journal={Journalism & Mass Communication Quarterly}, volume={75}, number={4}, pages={822--835}, year={1998}, publisher={SAGE Publications Sage CA: Los Angeles, CA} }

@article{blumenthal2001normative, title={Do normative appeals affect tax compliance? Evidence from a controlled experiment in Minnesota}, author={Blumenthal, Marsha and Christian, Charles and Slemrod, Joel}, journal={National Tax Journal}, volume={54}, number={1}, pages={125--138}, year={2001}, publisher={The University of Chicago Press} }

@article{winer2001framework, title={A framework for customer relationship management}, author={Winer, Russell S}, journal={California management review}, volume={43}, number={4}, pages={89--105}, year={2001}, publisher={SAGE Publications Sage CA: Los Angeles, CA} }

@article{resnick2006value, title={The value of reputation on eBay: A controlled experiment}, author={Resnick, Paul and Zeckhauser, Richard and Swanson, John and Lockwood, Kate}, journal={Experimental economics}, volume={9}, number={2}, pages={79--101}, year={2006}, publisher={Springer} }

@article{lewis2014online, title={Online ads and offline sales: measuring the effect of retail advertising via a controlled experiment on Yahoo!}, author={Lewis, Randall A and Reiley, David H}, journal={Quantitative Marketing and Economics}, volume={12}, number={3}, pages={235--266}, year={2014}, publisher={Springer} }

@misc{xia2021causalneural, title={The Causal-Neural Connection: Expressiveness, Learnability, and Inference}, author={Kevin Xia and Kai-Zhan Lee and Yoshua Bengio and Elias Bareinboim}, year={2021}, eprint={2107.00793}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@article{jung2021double, title={Double Machine Learning Density Estimation for Local Treatment Effects with Instruments}, author={Jung, Yonghan and Tian, Jin and Bareinboim, Elias}, year={2021} }

@misc{willig2021causal, title={The Causal Loss: Driving Correlation to Imply Causation}, author={Moritz Willig and Matej Zečević and Devendra Singh Dhami and Kristian Kersting}, year={2021}, eprint={2110.12066}, archivePrefix={arXiv}, primaryClass={cs.LG}

@article{goodfellow2014generative, title={Generative adversarial nets}, author={Goodfellow, Ian and Pouget-Abadie, Jean and Mirza, Mehdi and Xu, Bing and Warde-Farley, David and Ozair, Sherjil and Courville, Aaron and Bengio, Yoshua}, journal={Advances in neural information processing systems}, volume={27}, year={2014} }

@misc{45507, title = {Inceptionism: Going Deeper into Neural Networks}, author = {Alexander Mordvintsev and Christopher Olah and Mike Tyka}, year = {2015}, URL = {https://research.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html} }

@misc{yin2020dreaming, title={Dreaming to Distill: Data-free Knowledge Transfer via DeepInversion}, author={Hongxu Yin and Pavlo Molchanov and Zhizhong Li and Jose M. Alvarez and Arun Mallya and Derek Hoiem and Niraj K. Jha and Jan Kautz}, year={2020}, eprint={1912.08795}, archivePrefix={arXiv}, primaryClass={cs.LG} }

@book{book, author = {Spirtes, Peter and Glymour, Clark and Scheines, Richard}, year = {1993}, month = {01}, pages = {}, title = {Causation, Prediction, and Search}, volume = {81}, isbn = {978-1-4612-7650-0}, journal = {Causation, Prediction, and Search}, doi = {10.1007/978-1-4612-2748-9} }