{
  "format_version": 3,
  "claim_formal": {
    "subject": "AI tool usage by humans",
    "property": "associated with diminished critical thinking and problem-solving abilities",
    "operator": ">=",
    "operator_note": "The claim as stated is a universal causal assertion. We interpret it as: at least 3 independent, peer-reviewed or authoritative sources report that AI tool usage is associated with reduced critical thinking or problem-solving abilities. This is a consensus-of-evidence interpretation \u2014 the claim is PROVED if the weight of independent evidence supports the association, even though individual studies show correlation rather than proven causation. Important nuance: the evidence shows this effect is moderated by usage patterns, task stakes, and user confidence \u2014 heavy/uncritical use drives the decline, not all AI usage universally.",
    "threshold": 3,
    "proof_direction": "affirm"
  },
  "claim_natural": "Using AI tools makes humans worse at critical thinking and original problem-solving.",
  "evidence": {
    "B1": {
      "type": "empirical",
      "label": "Gerlich (2025): Negative correlation (r=-0.68) between AI usage and critical thinking scores in 666 participants",
      "sub_claim": null,
      "source": {
        "name": "PsyPost report on Gerlich (2025), Societies 15(1):6",
        "url": "https://www.psypost.org/ai-tools-may-weaken-critical-thinking-skills-by-encouraging-cognitive-offloading-study-suggests/",
        "quote": "Participants who reported heavy reliance on AI tools performed worse on critical thinking assessments compared to those who used these tools less frequently."
      },
      "verification": {
        "status": "verified",
        "method": "full_quote",
        "coverage_pct": null,
        "fetch_mode": "live",
        "credibility": {
          "domain": "psypost.org",
          "source_type": "unknown",
          "tier": 2,
          "flags": [],
          "note": "Unclassified domain \u2014 verify source authority manually"
        }
      },
      "extraction": {
        "value": "verified",
        "value_in_quote": true,
        "quote_snippet": "Participants who reported heavy reliance on AI tools performed worse on critical"
      }
    },
    "B2": {
      "type": "empirical",
      "label": "Lee et al. (2025, CHI): Higher confidence in GenAI associated with less critical thinking in 319 knowledge workers",
      "sub_claim": null,
      "source": {
        "name": "Microsoft Research \u2014 Lee et al. (2025), CHI 2025",
        "url": "https://www.microsoft.com/en-us/research/publication/the-impact-of-generative-ai-on-critical-thinking-self-reported-reductions-in-cognitive-effort-and-confidence-effects-from-a-survey-of-knowledge-workers/",
        "quote": "Higher confidence in GenAI is associated with less critical thinking, while higher self-confidence is associated with more critical thinking."
      },
      "verification": {
        "status": "verified",
        "method": "full_quote",
        "coverage_pct": null,
        "fetch_mode": "live",
        "credibility": {
          "domain": "microsoft.com",
          "source_type": "unknown",
          "tier": 2,
          "flags": [],
          "note": "Unclassified domain \u2014 verify source authority manually"
        }
      },
      "extraction": {
        "value": "verified",
        "value_in_quote": true,
        "quote_snippet": "Higher confidence in GenAI is associated with less critical thinking, while high"
      }
    },
    "B3": {
      "type": "empirical",
      "label": "Harvard Gazette (2025): Harvard faculty experts warn AI use undercuts critical thinking",
      "sub_claim": null,
      "source": {
        "name": "Harvard Gazette (2025)",
        "url": "https://news.harvard.edu/gazette/story/2025/11/is-ai-dulling-our-minds/",
        "quote": "I am very worried about the effects of general-use LLMs on critical reasoning skills"
      },
      "verification": {
        "status": "verified",
        "method": "full_quote",
        "coverage_pct": null,
        "fetch_mode": "live",
        "credibility": {
          "domain": "harvard.edu",
          "source_type": "academic",
          "tier": 4,
          "flags": [],
          "note": "Academic domain (.edu)"
        }
      },
      "extraction": {
        "value": "verified",
        "value_in_quote": true,
        "quote_snippet": "I am very worried about the effects of general-use LLMs on critical reasoning sk"
      }
    },
    "B4": {
      "type": "empirical",
      "label": "Jose et al. (2025, PMC): ChatGPT users scored 17% lower on concept understanding despite solving 48% more problems",
      "sub_claim": null,
      "source": {
        "name": "Jose et al. (2025), Frontiers \u2014 PMC",
        "url": "https://pmc.ncbi.nlm.nih.gov/articles/PMC12036037/",
        "quote": "Excessive reliance may reduce cognitive engagement and long-term retention"
      },
      "verification": {
        "status": "verified",
        "method": "full_quote",
        "coverage_pct": null,
        "fetch_mode": "live",
        "credibility": {
          "domain": "nih.gov",
          "source_type": "government",
          "tier": 5,
          "flags": [],
          "note": "Government domain (.gov)"
        }
      },
      "extraction": {
        "value": "verified",
        "value_in_quote": true,
        "quote_snippet": "Excessive reliance may reduce cognitive engagement and long-term retention"
      }
    },
    "A1": {
      "type": "computed",
      "label": "Verified source count meets threshold",
      "sub_claim": null,
      "method": "count(verified citations) = 4",
      "result": "4",
      "depends_on": []
    }
  },
  "cross_checks": [
    {
      "description": "Multiple independent sources consulted",
      "n_sources_consulted": 4,
      "n_sources_verified": 4,
      "sources": {
        "gerlich_2025": "verified",
        "lee_chi_2025": "verified",
        "harvard_gazette_2025": "verified",
        "pmc_cognitive_paradox": "verified"
      },
      "independence_note": "Sources are from different institutions and research teams: (1) SBS Swiss Business School via Phys.org, (2) Microsoft Research via CHI 2025, (3) Harvard University via Harvard Gazette, (4) Multiple Indian universities via PMC/Frontiers. No two sources share authors or datasets.",
      "fact_ids": []
    }
  ],
  "adversarial_checks": [
    {
      "question": "Do any studies show AI tools IMPROVE critical thinking or problem-solving?",
      "verification_performed": "Searched for 'AI tools improve critical thinking enhance problem solving evidence study 2025 2026'. Found that AI-powered classrooms can improve learning outcomes by 23-35% in STEM disciplines and language learning. Stanford research showed a 15% increase in scores for students using AI platforms. However, these gains are in knowledge acquisition, not in independent critical thinking or problem-solving ability. The PMC cognitive paradox paper itself notes that ChatGPT users solved 48% more problems but scored 17% lower on concept understanding \u2014 showing AI helps with task completion but may impair deeper cognitive engagement.",
      "finding": "AI tools can improve task performance and learning outcomes, but these benefits are distinct from critical thinking and independent problem-solving. The evidence consistently shows that while AI boosts productivity, it may simultaneously reduce the depth of cognitive engagement required for critical thinking.",
      "breaks_proof": false
    },
    {
      "question": "Are the effects task-dependent rather than universal?",
      "verification_performed": "Searched for Microsoft CHI 2025 findings on task-dependent effects. The Lee et al. study found that for high-stakes tasks requiring accuracy, workers expend MORE effort in critical thinking with AI. For routine, low-stakes tasks under time pressure, they report LESS critical thinking effort. This shows the effect is moderated by task stakes and user confidence, not universal.",
      "finding": "The cognitive decline effect is moderated by task stakes, user confidence, and usage patterns. This does not break the proof because: (1) the claim is supported by the overall pattern across multiple studies, (2) the operator_note explicitly acknowledges this nuance, and (3) even task-dependent effects confirm that AI usage CAN and DOES reduce critical thinking under common conditions (routine tasks, high AI confidence). The proof documents this important qualification.",
      "breaks_proof": false
    },
    {
      "question": "Has the key Gerlich (2025) study been retracted or significantly corrected?",
      "verification_performed": "Searched for 'Gerlich 2025 AI Tools in Society correction retraction'. Found a correction notice (Societies 2025, 15(9), 252) published September 2025. The correction addressed a duplicated table (Table 4 was a duplicate of Table 3). The author states the scientific conclusions are unaffected, and the correction was approved by the Academic Editor.",
      "finding": "The correction was minor (table duplication) and does not affect the study's findings or conclusions about the negative correlation between AI usage and critical thinking.",
      "breaks_proof": false
    }
  ],
  "verdict": {
    "value": "PROVED",
    "qualified": false,
    "qualifier": null,
    "reason": null
  },
  "key_results": {
    "n_confirmed": 4,
    "threshold": 3,
    "operator": ">=",
    "claim_holds": true
  },
  "generator": {
    "name": "proof-engine",
    "version": "1.2.0",
    "repo": "https://github.com/yaniv-golan/proof-engine",
    "generated_at": "2026-03-29"
  },
  "proof_py_url": "/proofs/using-ai-tools-makes-humans-worse-at-critical-thin/proof.py",
  "citation": {
    "doi": "10.5281/zenodo.19455692",
    "concept_doi": "10.5281/zenodo.19454400",
    "url": "https://proofengine.info/proofs/using-ai-tools-makes-humans-worse-at-critical-thin/",
    "author": "Proof Engine",
    "cite_bib_url": "/proofs/using-ai-tools-makes-humans-worse-at-critical-thin/cite.bib",
    "cite_ris_url": "/proofs/using-ai-tools-makes-humans-worse-at-critical-thin/cite.ris"
  },
  "depends_on": []
}