{
  "topics": [
    {
      "key": "understanding",
      "label": "Understanding Cultural Differences"
    },
    {
      "key": "adapting",
      "label": "Adapting to Different Cultures"
    },
    {
      "key": "multimodal",
      "label": "Multimodal Evaluation"
    },
    {
      "key": "social-bias",
      "label": "Social Bias"
    },
    {
      "key": "fake-news",
      "label": "Fake News Detection"
    }
  ],
  "publications": [
    {
      "id": "knowbias",
      "year": "2026",
      "topics": [
        "social-bias"
      ],
      "venue": "ArXiv",
      "title": "KnowBias: Mitigating Social Bias in LLMs via Know-Bias Neuron Enhancement",
      "authors": [
        "Jinhao Pan",
        "Chahat Raj",
        "Anjishnu Mukherjee",
        "Sina Mansouri",
        "Bowen Wei",
        "Shloka Yada",
        "Ziwei Zhu"
      ],
      "highlightedAuthors": [
        "Anjishnu Mukherjee"
      ],
      "image": {
        "source": "images/fig1/knowbias.png",
        "thumb": "images/optimized/fig1/knowbias-360.e42a69f9cf.webp",
        "full": "images/optimized/full/knowbias-full.ed19b118a6.webp",
        "alt": "KnowBias figure",
        "width": 360,
        "height": 335,
        "fullWidth": 960,
        "fullHeight": 893,
        "variants": [
          {
            "width": 360,
            "height": 335,
            "src": "images/optimized/fig1/knowbias-360.e42a69f9cf.webp"
          },
          {
            "width": 720,
            "height": 670,
            "src": "images/optimized/fig1/knowbias-720.639d4a29cf.webp"
          }
        ],
        "srcset": "images/optimized/fig1/knowbias-360.e42a69f9cf.webp 360w, images/optimized/fig1/knowbias-720.639d4a29cf.webp 720w",
        "sizes": "(max-width: 600px) calc(100vw - 2rem), 92px",
        "avifVariants": [
          {
            "width": 360,
            "height": 335,
            "src": "images/optimized/fig1/knowbias-360.5739cbcba6.avif"
          },
          {
            "width": 720,
            "height": 670,
            "src": "images/optimized/fig1/knowbias-720.1f31b9c33c.avif"
          }
        ],
        "avifSrcset": "images/optimized/fig1/knowbias-360.5739cbcba6.avif 360w, images/optimized/fig1/knowbias-720.1f31b9c33c.avif 720w"
      },
      "links": [
        {
          "label": "Paper",
          "href": "https://arxiv.org/pdf/2601.21864",
          "icon": "file"
        },
        {
          "label": "Code",
          "href": "https://github.com/JP-25/KnowBias",
          "icon": "code"
        }
      ],
      "abstract": "Highlights and amplifies neurons tied to bias-related knowledge so a model can preserve capability while reducing stereotyped generations across social groups."
    },
    {
      "id": "metadata-localization",
      "year": "2026",
      "topics": [
        "adapting"
      ],
      "venue": "ArXiv",
      "title": "Metadata Conditioned Large Language Models for Localization",
      "authors": [
        "Anjishnu Mukherjee",
        "Ziwei Zhu",
        "Antonios Anastasopoulos"
      ],
      "highlightedAuthors": [
        "Anjishnu Mukherjee"
      ],
      "image": {
        "source": "images/fig1/metadata.png",
        "thumb": "images/optimized/fig1/metadata-localization-360.c373887505.webp",
        "full": "images/optimized/full/metadata-localization-full.7d52c6b370.webp",
        "alt": "Metadata Conditioned LLMs figure",
        "width": 360,
        "height": 295,
        "fullWidth": 960,
        "fullHeight": 785,
        "variants": [
          {
            "width": 360,
            "height": 295,
            "src": "images/optimized/fig1/metadata-localization-360.c373887505.webp"
          },
          {
            "width": 720,
            "height": 589,
            "src": "images/optimized/fig1/metadata-localization-720.d001f76fa6.webp"
          }
        ],
        "srcset": "images/optimized/fig1/metadata-localization-360.c373887505.webp 360w, images/optimized/fig1/metadata-localization-720.d001f76fa6.webp 720w",
        "sizes": "(max-width: 600px) calc(100vw - 2rem), 92px",
        "avifVariants": [
          {
            "width": 360,
            "height": 295,
            "src": "images/optimized/fig1/metadata-localization-360.40e9012de5.avif"
          },
          {
            "width": 720,
            "height": 589,
            "src": "images/optimized/fig1/metadata-localization-720.23905b1da3.avif"
          }
        ],
        "avifSrcset": "images/optimized/fig1/metadata-localization-360.40e9012de5.avif 360w, images/optimized/fig1/metadata-localization-720.23905b1da3.avif 720w"
      },
      "links": [
        {
          "label": "Paper",
          "href": "https://arxiv.org/pdf/2601.15236",
          "icon": "file"
        },
        {
          "label": "Code",
          "href": "https://github.com/iamshnoo/metadata_localization",
          "icon": "code"
        }
      ],
      "abstract": "Conditions LLM behavior on structured cultural and regional metadata to improve localization quality without requiring a separate model for every setting."
    },
    {
      "id": "south-asian-biases",
      "year": "2025",
      "topics": [
        "social-bias"
      ],
      "venue": "TrustNLP @ ACL 2026",
      "title": "Measuring South Asian Biases in Large Language Models",
      "authors": [
        "Mamnuya Rinki",
        "Chahat Raj",
        "Anjishnu Mukherjee",
        "Ziwei Zhu"
      ],
      "highlightedAuthors": [
        "Anjishnu Mukherjee"
      ],
      "image": {
        "source": "images/fig1/south_asian_biases.png",
        "thumb": "images/optimized/fig1/south-asian-biases-360.93bb5c3784.webp",
        "full": "images/optimized/full/south-asian-biases-full.ac508bf79a.webp",
        "alt": "Measuring South Asian Biases figure",
        "width": 360,
        "height": 124,
        "fullWidth": 960,
        "fullHeight": 331,
        "variants": [
          {
            "width": 360,
            "height": 124,
            "src": "images/optimized/fig1/south-asian-biases-360.93bb5c3784.webp"
          },
          {
            "width": 720,
            "height": 248,
            "src": "images/optimized/fig1/south-asian-biases-720.a237eec93c.webp"
          }
        ],
        "srcset": "images/optimized/fig1/south-asian-biases-360.93bb5c3784.webp 360w, images/optimized/fig1/south-asian-biases-720.a237eec93c.webp 720w",
        "sizes": "(max-width: 600px) calc(100vw - 2rem), 92px",
        "avifVariants": [
          {
            "width": 360,
            "height": 124,
            "src": "images/optimized/fig1/south-asian-biases-360.e782891a88.avif"
          },
          {
            "width": 720,
            "height": 248,
            "src": "images/optimized/fig1/south-asian-biases-720.9ee3109246.avif"
          }
        ],
        "avifSrcset": "images/optimized/fig1/south-asian-biases-360.e782891a88.avif 360w, images/optimized/fig1/south-asian-biases-720.9ee3109246.avif 720w"
      },
      "links": [
        {
          "label": "Paper",
          "href": "https://arxiv.org/pdf/2505.18466",
          "icon": "file"
        },
        {
          "label": "Code",
          "href": "https://github.com/mamnuya/purdah_and_patriarchy",
          "icon": "code"
        }
      ],
      "abstract": "Benchmarks how large language models portray South Asian identities, surfacing recurring stereotypes, representational gaps, and uneven treatment across prompts and groups."
    },
    {
      "id": "crossroads",
      "year": "2025",
      "topics": [
        "multimodal"
      ],
      "venue": "WACV '25",
      "title": "Crossroads of Continents: Automated Artifact Extraction for Cultural Adaptation with Large Multimodal Models",
      "authors": [
        "Anjishnu Mukherjee",
        "Ziwei Zhu",
        "Antonios Anastasopoulos"
      ],
      "highlightedAuthors": [
        "Anjishnu Mukherjee"
      ],
      "image": {
        "source": "images/fig1/crossroads.png",
        "thumb": "images/optimized/fig1/crossroads-360.ec33a3eb82.webp",
        "full": "images/optimized/full/crossroads-full.d4bae18736.webp",
        "alt": "Crossroads of Continents figure",
        "width": 347,
        "height": 360,
        "fullWidth": 926,
        "fullHeight": 960,
        "variants": [
          {
            "width": 347,
            "height": 360,
            "src": "images/optimized/fig1/crossroads-360.ec33a3eb82.webp"
          },
          {
            "width": 694,
            "height": 720,
            "src": "images/optimized/fig1/crossroads-720.c749c9d3bc.webp"
          }
        ],
        "srcset": "images/optimized/fig1/crossroads-360.ec33a3eb82.webp 347w, images/optimized/fig1/crossroads-720.c749c9d3bc.webp 694w",
        "sizes": "(max-width: 600px) calc(100vw - 2rem), 92px",
        "avifVariants": [
          {
            "width": 347,
            "height": 360,
            "src": "images/optimized/fig1/crossroads-360.28f0eea330.avif"
          },
          {
            "width": 694,
            "height": 720,
            "src": "images/optimized/fig1/crossroads-720.beff163537.avif"
          }
        ],
        "avifSrcset": "images/optimized/fig1/crossroads-360.28f0eea330.avif 347w, images/optimized/fig1/crossroads-720.beff163537.avif 694w"
      },
      "links": [
        {
          "label": "Paper",
          "href": "https://www.arxiv.org/pdf/2407.02067",
          "icon": "file"
        },
        {
          "label": "Code",
          "href": "https://github.com/iamshnoo/crossroads",
          "icon": "code"
        }
      ],
      "abstract": "Uses multimodal models to extract culturally salient artifacts from images so visual content can be adapted more appropriately across regional contexts."
    },
    {
      "id": "biasdora",
      "year": "2024",
      "topics": [
        "social-bias"
      ],
      "venue": "EMNLP Findings '24",
      "title": "BiasDora: Exploring Hidden Biased Associations in Vision-Language Models",
      "authors": [
        "Chahat Raj",
        "Anjishnu Mukherjee",
        "Aylin Caliskan",
        "Antonios Anastasopoulos",
        "Ziwei Zhu"
      ],
      "highlightedAuthors": [
        "Anjishnu Mukherjee"
      ],
      "image": {
        "source": "images/fig1/biasdora.png",
        "thumb": "images/optimized/fig1/biasdora-360.cd21638bd0.webp",
        "full": "images/optimized/full/biasdora-full.01ecb43218.webp",
        "alt": "BiasDora figure",
        "width": 254,
        "height": 360,
        "fullWidth": 595,
        "fullHeight": 842,
        "variants": [
          {
            "width": 254,
            "height": 360,
            "src": "images/optimized/fig1/biasdora-360.cd21638bd0.webp"
          },
          {
            "width": 509,
            "height": 720,
            "src": "images/optimized/fig1/biasdora-720.505398cf7d.webp"
          }
        ],
        "srcset": "images/optimized/fig1/biasdora-360.cd21638bd0.webp 254w, images/optimized/fig1/biasdora-720.505398cf7d.webp 509w",
        "sizes": "(max-width: 600px) calc(100vw - 2rem), 92px",
        "avifVariants": [
          {
            "width": 254,
            "height": 360,
            "src": "images/optimized/fig1/biasdora-360.900a25e576.avif"
          },
          {
            "width": 509,
            "height": 720,
            "src": "images/optimized/fig1/biasdora-720.aaff02dc46.avif"
          }
        ],
        "avifSrcset": "images/optimized/fig1/biasdora-360.900a25e576.avif 254w, images/optimized/fig1/biasdora-720.aaff02dc46.avif 509w"
      },
      "links": [
        {
          "label": "Paper",
          "href": "https://www.arxiv.org/pdf/2407.02066",
          "icon": "file"
        },
        {
          "label": "Code",
          "href": "https://github.com/chahatraj/BiasDora",
          "icon": "code"
        }
      ],
      "abstract": "Probes vision-language models for hidden biased associations, showing how multimodal representations can encode and reproduce social stereotypes."
    },
    {
      "id": "breaking-bias",
      "year": "2024",
      "topics": [
        "social-bias"
      ],
      "venue": "AIES '24",
      "title": "Breaking Bias, Building Bridges: Evaluation and Mitigation of Social Biases in LLMs via Contact Hypothesis",
      "authors": [
        "Chahat Raj*",
        "Anjishnu Mukherjee*",
        "Aylin Caliskan",
        "Antonios Anastasopoulos",
        "Ziwei Zhu"
      ],
      "highlightedAuthors": [
        "Anjishnu Mukherjee*"
      ],
      "image": {
        "source": "images/fig1/breakingbias.png",
        "thumb": "images/optimized/fig1/breaking-bias-360.f2a8b2000f.webp",
        "full": "images/optimized/full/breaking-bias-full.0c2153a675.webp",
        "alt": "Breaking Bias figure",
        "width": 254,
        "height": 360,
        "fullWidth": 595,
        "fullHeight": 842,
        "variants": [
          {
            "width": 254,
            "height": 360,
            "src": "images/optimized/fig1/breaking-bias-360.f2a8b2000f.webp"
          },
          {
            "width": 509,
            "height": 720,
            "src": "images/optimized/fig1/breaking-bias-720.5471a18200.webp"
          }
        ],
        "srcset": "images/optimized/fig1/breaking-bias-360.f2a8b2000f.webp 254w, images/optimized/fig1/breaking-bias-720.5471a18200.webp 509w",
        "sizes": "(max-width: 600px) calc(100vw - 2rem), 92px",
        "avifVariants": [
          {
            "width": 254,
            "height": 360,
            "src": "images/optimized/fig1/breaking-bias-360.c1ddab44a3.avif"
          },
          {
            "width": 509,
            "height": 720,
            "src": "images/optimized/fig1/breaking-bias-720.5673ad3cd0.avif"
          }
        ],
        "avifSrcset": "images/optimized/fig1/breaking-bias-360.c1ddab44a3.avif 254w, images/optimized/fig1/breaking-bias-720.5673ad3cd0.avif 509w"
      },
      "links": [
        {
          "label": "Paper",
          "href": "https://arxiv.org/pdf/2407.02030",
          "icon": "file"
        },
        {
          "label": "Code",
          "href": "https://github.com/chahatraj/breakingbias",
          "icon": "code"
        }
      ],
      "abstract": "Evaluates whether contact-hypothesis-inspired interventions can reduce social bias in LLM outputs while maintaining the model's overall usefulness."
    },
    {
      "id": "global-gallery",
      "year": "2024",
      "topics": [
        "adapting"
      ],
      "venue": "NAACL '24",
      "title": "Global Gallery: The Fine Art of Painting Culture Portraits through Multilingual Instruction Tuning",
      "authors": [
        "Anjishnu Mukherjee",
        "Aylin Caliskan",
        "Ziwei Zhu",
        "Antonios Anastasopoulos"
      ],
      "highlightedAuthors": [
        "Anjishnu Mukherjee"
      ],
      "image": {
        "source": "images/fig1/global_gallery.png",
        "thumb": "images/optimized/fig1/global-gallery-360.8b0af0a7ea.webp",
        "full": "images/optimized/full/global-gallery-full.58e0cf6822.webp",
        "alt": "Global Gallery figure",
        "width": 254,
        "height": 360,
        "fullWidth": 595,
        "fullHeight": 842,
        "variants": [
          {
            "width": 254,
            "height": 360,
            "src": "images/optimized/fig1/global-gallery-360.8b0af0a7ea.webp"
          },
          {
            "width": 509,
            "height": 720,
            "src": "images/optimized/fig1/global-gallery-720.a117141e02.webp"
          }
        ],
        "srcset": "images/optimized/fig1/global-gallery-360.8b0af0a7ea.webp 254w, images/optimized/fig1/global-gallery-720.a117141e02.webp 509w",
        "sizes": "(max-width: 600px) calc(100vw - 2rem), 92px",
        "avifVariants": [
          {
            "width": 254,
            "height": 360,
            "src": "images/optimized/fig1/global-gallery-360.d16f299cda.avif"
          },
          {
            "width": 509,
            "height": 720,
            "src": "images/optimized/fig1/global-gallery-720.d292678692.avif"
          }
        ],
        "avifSrcset": "images/optimized/fig1/global-gallery-360.d16f299cda.avif 254w, images/optimized/fig1/global-gallery-720.d292678692.avif 509w"
      },
      "links": [
        {
          "label": "Paper",
          "href": "https://aclanthology.org/2024.naacl-long.355.pdf",
          "icon": "file"
        },
        {
          "label": "Code",
          "href": "https://github.com/iamshnoo/culture-llm",
          "icon": "code"
        }
      ],
      "abstract": "Builds multilingual instruction-tuning methods that help language models generate culturally grounded depictions instead of flattening diverse visual traditions."
    },
    {
      "id": "salsa",
      "year": "2024",
      "topics": [
        "fake-news"
      ],
      "venue": "ECIR '24",
      "title": "SALSA: Salience-Based Switching Attack for Adversarial Perturbations in Fake News Detection Models",
      "authors": [
        "Chahat Raj*",
        "Anjishnu Mukherjee*",
        "Hemant Purohit",
        "Antonios Anastasopoulos",
        "Ziwei Zhu"
      ],
      "highlightedAuthors": [
        "Anjishnu Mukherjee*"
      ],
      "image": {
        "source": "images/fig1/salsa.png",
        "thumb": "images/optimized/fig1/salsa-360.e1a6ca5ad5.webp",
        "full": "images/optimized/full/salsa-full.3941d99bd7.webp",
        "alt": "SALSA figure",
        "width": 254,
        "height": 360,
        "fullWidth": 595,
        "fullHeight": 842,
        "variants": [
          {
            "width": 254,
            "height": 360,
            "src": "images/optimized/fig1/salsa-360.e1a6ca5ad5.webp"
          },
          {
            "width": 509,
            "height": 720,
            "src": "images/optimized/fig1/salsa-720.7d38d2db06.webp"
          }
        ],
        "srcset": "images/optimized/fig1/salsa-360.e1a6ca5ad5.webp 254w, images/optimized/fig1/salsa-720.7d38d2db06.webp 509w",
        "sizes": "(max-width: 600px) calc(100vw - 2rem), 92px",
        "avifVariants": [
          {
            "width": 254,
            "height": 360,
            "src": "images/optimized/fig1/salsa-360.12e79ae783.avif"
          },
          {
            "width": 509,
            "height": 720,
            "src": "images/optimized/fig1/salsa-720.a9a3fafafe.avif"
          }
        ],
        "avifSrcset": "images/optimized/fig1/salsa-360.12e79ae783.avif 254w, images/optimized/fig1/salsa-720.a9a3fafafe.avif 509w"
      },
      "links": [
        {
          "label": "Paper",
          "href": "https://chahatraj.github.io/files/d3.pdf",
          "icon": "file"
        },
        {
          "label": "Code",
          "href": "https://github.com/iamshnoo/salsa",
          "icon": "code"
        }
      ],
      "abstract": "Introduces a salience-based adversarial attack that exposes brittleness in fake-news detectors by perturbing the most influential tokens."
    },
    {
      "id": "global-voices",
      "year": "2023",
      "topics": [
        "understanding"
      ],
      "venue": "EMNLP '23",
      "title": "Global Voices, Local Biases: Socio-cultural Prejudices across Languages",
      "authors": [
        "Anjishnu Mukherjee*",
        "Chahat Raj*",
        "Ziwei Zhu",
        "Antonios Anastasopoulos"
      ],
      "highlightedAuthors": [
        "Anjishnu Mukherjee*"
      ],
      "image": {
        "source": "images/fig1/global_voices.png",
        "thumb": "images/optimized/fig1/global-voices-360.27b23a384e.webp",
        "full": "images/optimized/full/global-voices-full.48d09868c2.webp",
        "alt": "Global Voices figure",
        "width": 254,
        "height": 360,
        "fullWidth": 595,
        "fullHeight": 842,
        "variants": [
          {
            "width": 254,
            "height": 360,
            "src": "images/optimized/fig1/global-voices-360.27b23a384e.webp"
          },
          {
            "width": 509,
            "height": 720,
            "src": "images/optimized/fig1/global-voices-720.d7cbe6cbb6.webp"
          }
        ],
        "srcset": "images/optimized/fig1/global-voices-360.27b23a384e.webp 254w, images/optimized/fig1/global-voices-720.d7cbe6cbb6.webp 509w",
        "sizes": "(max-width: 600px) calc(100vw - 2rem), 92px",
        "avifVariants": [
          {
            "width": 254,
            "height": 360,
            "src": "images/optimized/fig1/global-voices-360.5e502a350b.avif"
          },
          {
            "width": 509,
            "height": 720,
            "src": "images/optimized/fig1/global-voices-720.e34c07ac5b.avif"
          }
        ],
        "avifSrcset": "images/optimized/fig1/global-voices-360.5e502a350b.avif 254w, images/optimized/fig1/global-voices-720.e34c07ac5b.avif 509w"
      },
      "links": [
        {
          "label": "Paper",
          "href": "https://aclanthology.org/2023.emnlp-main.981.pdf",
          "icon": "file"
        },
        {
          "label": "Code",
          "href": "https://github.com/iamshnoo/weathub",
          "icon": "code"
        }
      ],
      "abstract": "Studies how socio-cultural prejudices vary across languages, showing that multilingual systems do not simply inherit one uniform pattern of bias."
    },
    {
      "id": "true-and-fair",
      "year": "2023",
      "topics": [
        "fake-news"
      ],
      "venue": "AIES '23",
      "title": "True and Fair: Robust and Unbiased Fake News Detection via Interpretable Machine Learning",
      "authors": [
        "Chahat Raj",
        "Anjishnu Mukherjee",
        "Ziwei Zhu"
      ],
      "highlightedAuthors": [
        "Anjishnu Mukherjee"
      ],
      "image": {
        "source": "images/fig1/true_and_fair.png",
        "thumb": "images/optimized/fig1/true-and-fair-360.5240b9f82d.webp",
        "full": "images/optimized/full/true-and-fair-full.b71737fd6c.webp",
        "alt": "True and Fair figure",
        "width": 240,
        "height": 360,
        "fullWidth": 640,
        "fullHeight": 960,
        "variants": [
          {
            "width": 240,
            "height": 360,
            "src": "images/optimized/fig1/true-and-fair-360.5240b9f82d.webp"
          },
          {
            "width": 480,
            "height": 720,
            "src": "images/optimized/fig1/true-and-fair-720.e63b23171a.webp"
          }
        ],
        "srcset": "images/optimized/fig1/true-and-fair-360.5240b9f82d.webp 240w, images/optimized/fig1/true-and-fair-720.e63b23171a.webp 480w",
        "sizes": "(max-width: 600px) calc(100vw - 2rem), 92px",
        "avifVariants": [
          {
            "width": 240,
            "height": 360,
            "src": "images/optimized/fig1/true-and-fair-360.fbccde7ee6.avif"
          },
          {
            "width": 480,
            "height": 720,
            "src": "images/optimized/fig1/true-and-fair-720.8b99ebdaa0.avif"
          }
        ],
        "avifSrcset": "images/optimized/fig1/true-and-fair-360.fbccde7ee6.avif 240w, images/optimized/fig1/true-and-fair-720.8b99ebdaa0.avif 480w"
      },
      "links": [
        {
          "label": "Paper",
          "href": "https://dl.acm.org/doi/10.1145/3600211.3604760",
          "icon": "file"
        },
        {
          "label": "Code",
          "href": "https://github.com/chahatraj/true-and-fair",
          "icon": "code"
        }
      ],
      "abstract": "Combines robust modeling with interpretable signals to improve fake-news detection while reducing unwanted bias in the system's decisions."
    }
  ]
}
