<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Public Health Surveill</journal-id><journal-id journal-id-type="publisher-id">publichealth</journal-id><journal-id journal-id-type="index">9</journal-id><journal-title>JMIR Public Health and Surveillance</journal-title><abbrev-journal-title>JMIR Public Health Surveill</abbrev-journal-title><issn pub-type="epub">2369-2960</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v12i1e88470</article-id><article-id pub-id-type="doi">10.2196/88470</article-id><article-categories><subj-group subj-group-type="heading"><subject>Viewpoint</subject></subj-group></article-categories><title-group><article-title>Integrating AI Into Governmental Public Health Decision Making: Challenges, Considerations, and a Path Forward</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Campbell</surname><given-names>Elizabeth</given-names></name><degrees>MS, MScPH, PhD</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Oyefolu</surname><given-names>Oluremilekun</given-names></name><degrees>MD, MPH</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Gillani</surname><given-names>Sarah</given-names></name><degrees>MPH</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Goodtree</surname><given-names>Hannah</given-names></name><degrees>MPH</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Kelly</surname><given-names>Alison</given-names></name><degrees>MPIA</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Rivers</surname><given-names>Caitlin</given-names></name><degrees>MPH, PhD</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Watson</surname><given-names>Crystal</given-names></name><degrees>MPH, DrPH</degrees><xref ref-type="aff" rid="aff1"/></contrib></contrib-group><aff id="aff1"><institution>Johns Hopkins Center for Outbreak Response Innovation</institution><addr-line>700 East Pratt Street, Suite 900</addr-line><addr-line>Baltimore</addr-line><addr-line>MD</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Mavragani</surname><given-names>Amaryllis</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Sanchez</surname><given-names>Travis</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Akyuz</surname><given-names>Mert</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Kakraba</surname><given-names>Samuel</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Elizabeth Campbell, MS, MScPH, PhD, Johns Hopkins Center for Outbreak Response Innovation, 700 East Pratt Street, Suite 900, Baltimore, MD, 21202, United States, 1 6097524672; <email>ecampb18@jhu.edu</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>27</day><month>4</month><year>2026</year></pub-date><volume>12</volume><elocation-id>e88470</elocation-id><history><date date-type="received"><day>25</day><month>11</month><year>2025</year></date><date date-type="rev-recd"><day>16</day><month>03</month><year>2026</year></date><date date-type="accepted"><day>20</day><month>03</month><year>2026</year></date></history><copyright-statement>&#x00A9; Elizabeth Campbell, Oluremilekun Oyefolu, Sarah Gillani, Hannah Goodtree, Alison Kelly, Caitlin Rivers, Crystal Watson. Originally published in JMIR Public Health and Surveillance (<ext-link ext-link-type="uri" xlink:href="https://publichealth.jmir.org">https://publichealth.jmir.org</ext-link>), 27.4.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Public Health and Surveillance, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://publichealth.jmir.org">https://publichealth.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://publichealth.jmir.org/2026/1/e88470"/><abstract><p>Public health emergencies such as pandemics, natural disasters, and epidemics may require rapid, high-stakes decisions often made by elected officials with limited public health training. Artificial intelligence (AI) holds significant promise to enhance the quality, transparency, and timeliness of governmental decision-making during such crises. This paper examines the potential of AI as a decision-support tool for elected officials while identifying key technical, logistical, ethical, and policy challenges. Technical considerations include model accuracy, data representativeness, and privacy protection, while ethical imperatives center on fairness, transparency, and accountability to prevent amplification of existing health disparities. The paper further explores workforce development needs, emphasizing AI literacy and cross-sector collaboration to enable informed use of AI insights. This viewpoint presents a novel AI Decision Support Lifecycle framework specifically designed for governmental public health emergency response, mapping six phases from problem definition through post-emergency evaluation. We provide stakeholder-specific recommendations for model developers, health agencies, and elected officials, and illustrate practical application through a detailed case example and use cases. Drawing on empirical evidence regarding digital health technologies and AI governance, we emphasize that technology deployment alone is insufficient. Successful implementation requires complementary investments in organizational capacity, data infrastructure, workforce training, community engagement, and continuous evaluation. AI integration also requires robust governance frameworks, continuous model evaluation, and alignment with existing crisis management structures. Policy recommendations highlight the importance of ethical AI frameworks, risk assessments, and public engagement to foster trust. Ultimately, AI can strengthen public health decision-making if developed and implemented responsibly within transparent and equitable systems.</p></abstract><kwd-group><kwd>public health</kwd><kwd>elected officials</kwd><kwd>governance</kwd><kwd>AI</kwd><kwd>artificial intelligence</kwd><kwd>decision making</kwd><kwd>public health emergency response</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Public health emergencies, such as the COVID-19 pandemic, the 2014&#x2010;2016 West Africa Ebola epidemic, and natural disasters such as floods or hurricanes, pose serious and pressing individual and population health threats. Depending on the size and scope of these emergencies, they can require urgent and harmonized responses from governmental decision makers and public health systems at the local/tribal, state/territorial, and even national levels [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. Sometimes, when an emergency is of significant size and scope, or is highly visible or consequential, decisions that are typically made by public health or emergency management experts shift to, or at least involve, elected officials, such as state governors [<xref ref-type="bibr" rid="ref3">3</xref>]. This shift might be made because elected officials are politically responsible for the outcome of a response, or the shift might happen because only elected officials can exercise the legal powers necessary to respond. For example, in all states and territories, governors can issue emergency declarations to enact measures such as accessing and reallocating funds or suspending laws and regulations that may impede response efforts [<xref ref-type="bibr" rid="ref4">4</xref>-<xref ref-type="bibr" rid="ref6">6</xref>].</p><p>Regardless of the inciting event, when elected officials or other leaders are thrust into a public health decision-making role, often without formal public health or crisis response training, they may need additional support to ensure that decisions are well-considered. While public health and emergency management experts can, and should, play a significant supporting role in decision-making, leaders without good decision-making processes in place can easily fall victim to so-called decision-derailers&#x2013;common biases, traps, and heuristics that can make decisions less-than sound [<xref ref-type="bibr" rid="ref7">7</xref>].</p></sec><sec id="s2"><title>Challenges to Decision Making During Public Health Emergencies</title><p>A hallmark of public health emergencies is an initial severe information deficit, while many competing voices clamor for urgent action. There is also inevitably a tug-of-war between health and safety priorities and other societal imperatives such as economic stability. Further, the broad categories of &#x201C;health,&#x201D; &#x201C;economic,&#x201D; and &#x201C;societal&#x201D; impacts must themselves be unpacked; choices about how to respond, what measures and recommendations should be put in place, must consider and involve diverse populations who are affected in very different ways and to different degrees.</p><p>So, what can leaders do to ensure that their decision-making process has systematically identified and considered a full range of actions that could effectively address fundamental goals; carefully considered and balanced all the values at stake; incorporated the information available while hedging against the remaining uncertainties; and has built public support through its transparency and thoroughness?</p></sec><sec id="s3"><title>Operationalizing Effective Decision Making During Public Health Emergencies</title><p>First, policymakers should clearly define, justify, and articulate the strategic goals that they aim to achieve and then share this information with the public. Next, it is critical to identify what principles leaders want to uphold in an emergency response. Protecting the health of the public is a general principle that most leaders identify with. Are there other important principles that should not be forgotten, like ensuring the consequences of interventions are fair across populations? As decisions are not made in a vacuum, the voices of diverse experts help to consider a variety of angles to a problem.</p><p>Once the goals, principles, and structure for decision-making are in place, an assessment process for identifying and evaluating options for action is needed. Evaluating the possible downstream impacts of options (both positive and negative), comparing options against one another, and assessing the logistical feasibility of the range of options. Finally, in addition to a continual updating as new evidence and information emerges, well-documented communication practices highlight the importance of telling the public that information may change and foreshadowing how new information can change plans.</p><p>In a crisis, these structures and processes are very difficult to implement, especially under time pressure and without prior planning. Developing plans ahead of a crisis is optimal, but limited time and resources make this rare, and leaders will still face an information deficit difficult to overcome with legacy approaches to data gathering.</p></sec><sec id="s4"><title>AI for Public Health Decision-Making</title><p>Artificial intelligence holds considerable promise to help decision-makers gather data to inform decisions and implement decision-making processes to systematically consider options and undertake tradeoff analyses. Ultimately, AI has the potential to greatly enhance the quality of public health decision-making in these emergencies. However, while the potential for decision support is significant, numerous challenges persist in developing and operationalizing AI for public health crisis decision making. While digital health technologies and information and communication technology (ICT) infrastructure have expanded rapidly, empirical evidence regarding their impact on population health outcomes remains mixed [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref12">12</xref>]. Studies show that ICT adoption does not automatically translate into measurable health improvements, particularly when institutional capacity, governance quality, workforce skills, and implementation fidelity are insufficient. Previous digital health initiatives often showed weak or context-dependent effects due to inadequate organizational readiness, data quality issues, and a lack of complementary investments in regulatory frameworks and human capital.</p><p>The following viewpoint considers technical, logistical, and ethical challenges in developing AI tools to support elected officials in public health decision making and describes important considerations when addressing AI applications to optimize system use in the public health domain.</p><sec id="s4-1"><title>Aim and Scope</title><p>This viewpoint article aims to: (1) examine the potential role of AI as a decision-support tool for elected officials during public health emergencies and (2) identify key technical, ethical, and governance challenges that must be addressed.</p><p>This viewpoint focuses specifically on US federal, state, and local governmental public health decision-making contexts. While many principles may apply internationally, our discussion of policy frameworks, legal requirements, and organizational structures reflects the US governance system. Jurisdictions with different regulatory environments, data infrastructure capacity, or political structures may face distinct challenges requiring adaptation of these recommendations.</p></sec><sec id="s4-2"><title>Target Audience</title><p>This viewpoint is written for three primary audiences: (1) state and local public health leaders and emergency management officials who must integrate AI tools into decision-making workflows; (2) AI developers and data scientists working with governmental agencies on public health applications; and (3) elected officials and policymakers who oversee public health emergency response and must understand both the potential benefits and risks of AI-supported decision making.</p></sec><sec id="s4-3"><title>Methodological Approach</title><p>This is a viewpoint article that synthesizes insights from the emerging interdisciplinary literature on AI in public health, governmental AI governance, and algorithmic fairness. Literature was identified through targeted searches of key databases (PubMed, Web of Science, OAISTER) and policy sources (federal and state government websites, Office of Management and Budget [OMB] guidance, state legislative databases), focusing on AI applications in governmental public health decision-making, with particular attention to recent publications (2020&#x2010;2025) addressing COVID-19 response and other public health emergencies.</p><p>While existing AI governance frameworks (eg, the National Institute of Standards and Technology (NIST) AI Risk Management Framework, Organisation for Economic Co-operation and Development (OECD) AI Principles) offer domain-general guidance, and crisis management models address emergency decision structures, neither adequately addresses the intersection of AI-specific technical challenges and the distinctive governance realities of public health emergency response [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. This includes the shift of decision authority to elected officials with limited technical training, political accountability pressures, and the need for real-time model adaptation. The AI Decision Support Lifecycle framework presented here is designed to fill this gap by providing phase-specific, stakeholder-differentiated guidance tailored to this context.</p></sec></sec><sec id="s5"><title>Technical Challenges in Model Development</title><p>Machine learning (ML) models that support public health decision-making are subject to technical challenges in their development pipeline prior to being scaled for public health response efforts. Underfitting and overfitting are two of the most common challenges in the model development process, which limit a model&#x2019;s utility. Models are sensitive to quality and representativeness in the data used to train them.</p><p>Overfitting occurs when a model becomes too specialized in its training data and fails to perform well on new, unseen data. Overfitting limits a model&#x2019;s generalizability on test or real-world data and leads to high variance in model performance based on changes to input data. For example, a forecasting model trained on historical influenza data that captures every nuance of past flu seasons may perform poorly during a novel pandemic like COVID-19, where transmission dynamics, public behavior, and health care utilization patterns differ substantially from historical precedent. Overfitting may be addressed by collecting more data to create a more robust training set, reducing the model parameters or undertaking feature selection, and using cross-validation. Underfitting occurs when a model is too simplistic and fails to capture underlying patterns in data. This leads to missed patterns and inaccurate predictions, even for training data. In emergency contexts, this might manifest as a basic model that predicts hospital admissions using only a single variable (eg, previous d&#x2019;s case count) while ignoring critical factors like vaccination rates, demographic composition, or seasonal patterns. Such models provide unreliable guidance and may lead decision-makers to underestimate resource needs. Increasing the number of model parameters or features and reducing regularization may help to reduce underfitting.</p></sec><sec id="s6"><title>Ethical Considerations for AI in Public Health</title><sec id="s6-1"><title>Bias and Fairness in AI Algorithms for Public Health Decision Making</title><p>Ensuring algorithmic fairness and minimizing potential algorithmic biases is a key goal for successfully implementing AI into public health decision-making processes. Data used for developing AI models for public health practice may come from a variety of sources, including electronic health records, governmental data on demographic, socioeconomic, and environmental indicators, social media, and genomic data. These models are trained on vast amounts of data, which may inadvertently contain bias. Without actively and adequately addressing these biases, AI models for public health decision making may amplify and perpetuate existing health disparities.</p><p>Bias in public health AI models may come from a variety of sources. If training data is not representative of all subpopulations (eg, age, race/ethnicity, gender), this leads to sampling bias and may cause inaccurate model predictions for members of underrepresented groups [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. Furthermore, AI models trained on past health care data may learn historical biases embedded in these data [<xref ref-type="bibr" rid="ref17">17</xref>]. For example, Obermeyer et al identified bias in model performance against Black patients for a commercial algorithm used to predict population health risk. The model used health care costs as a proxy for health (ie, lower health care costs equating to better health). However, in the US health care system, less money is spent caring for Black patients compared to White patients, and Black patients are less likely to seek out and obtain medical care [<xref ref-type="bibr" rid="ref18">18</xref>]. Health care data is also subject to measurement bias, which occurs when there are systemic errors in how health outcomes are measured and recorded (eg, misdiagnosis or underreporting) [<xref ref-type="bibr" rid="ref19">19</xref>]. Finally, bias may be introduced algorithmically (eg, feature selection bias) or in model design processes [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref22">22</xref>]. AI algorithms also often work as black boxes, which further complicates their deployment in public health decision making efforts since it is difficult for both government officials and constituents to understand how a decision was reached. This lack of transparency and explainability can reduce trust in AI-driven policies, cause concerns about accountability, and create complex ethical and legal challenges should AI-driven recommendations lead to harm [<xref ref-type="bibr" rid="ref23">23</xref>]. Should AI solutions produce incorrect, harmful, or misleading recommendations, this leads to public mistrust and decreased likelihood of buy-in for public health measures that require community-wide support (eg, social distancing).</p></sec><sec id="s6-2"><title>Equity in Practice</title><p>To mitigate some of these potential risks, organizations responsible for developing and scaling public health AI models and governmental decision makers may undertake several actions. First, model designers should carefully curate large, representative datasets that reflect the diverse populations that public health decisions will impact. When models are developed, it is also critical that designers assess models for performance disparities, identify fairness metrics, and undertake careful documentation of model development processes to ensure transparency and accountability. Furthermore, it is essential that decision makers receive thorough training so that they are able to understand and interpret AI system recommendations to promote trust, accountability, and transparency. To operationalize equity in public health AI systems, organizations must implement concrete safeguards throughout the AI lifecycle, including: establishing compensated community advisory boards with decision-making authority from historically marginalized populations; engaging diverse stakeholders through participatory design processes to identify potential harms and validate outputs; conducting formal equity impact assessments before and during deployment with transparent public reporting; evaluating and reporting model performance metrics separately for demographic and geographic subgroups (race/ethnicity, age, gender, socioeconomic status, disability, language, geography) with minimum performance thresholds for all groups rather than population averages alone; developing culturally appropriate communication strategies that provide plain language, multilingual explanations of AI use through trusted community channels with clear feedback and recourse mechanisms; and implementing continuous bias auditing through both internal reviews and independent third-party assessments with public documentation of findings and remediation steps. Finally, model development teams and government organizations should work together to regularly monitor and evaluate AI models to identify and correct potential biases, update and retrain models as new data becomes available, and ensure that models continue to adhere to existing legal and ethical standards [<xref ref-type="bibr" rid="ref24">24</xref>].</p></sec><sec id="s6-3"><title>Privacy and Data Security</title><p>AI and ML programs and utilization can both enhance data security or generate issues related to privacy, confidentiality breaches, and data security [<xref ref-type="bibr" rid="ref25">25</xref>]. There are opportunities for both malicious and accidental incidents that can result in confidence reduction, misclassification of data, or misclassification of outputs, all with the potential for severe downstream effects [<xref ref-type="bibr" rid="ref26">26</xref>]. AI and ML models require the collection, storage, and analysis of vast quantities of data, including health data, personal identifying information, and confidential information. Security risks are a huge concern for these models and should be a priority when developing and using AI or ML tools. Opportunities for data breaches or loss of security could occur at an individual level, including someone with authorized access using data maliciously, it could occur accidentally through non-secure networks, or it could occur through organized and intentional breaches into private systems [<xref ref-type="bibr" rid="ref27">27</xref>].</p><p>The vast number of systems that may interact with an individual&#x2019;s personal data also increases the risk of privacy concerns, since more systems translates to more opportunities for risk [<xref ref-type="bibr" rid="ref28">28</xref>]. It is essential that developers, researchers, and institutions develop, test, and continue to improve and invest in safeguards for data access, use, and control. These safeguards can include privacy risk mitigation strategies and privacy-enhancing technologies [<xref ref-type="bibr" rid="ref29">29</xref>], including de-identifying data, safeguarding physical access to systems, and strong encryption and protection of cloud-based systems [<xref ref-type="bibr" rid="ref28">28</xref>]. There are also ethical considerations for inadequate data privacy protections, including data breaches or unauthorized access. Individuals who have personal or confidential data used in these models should have provided fully informed consent [<xref ref-type="bibr" rid="ref30">30</xref>], be updated in the event of a security compromise, and have the option to withdraw their data from the model. There are ongoing discussions on data ownership and profit from individual data as well [<xref ref-type="bibr" rid="ref31">31</xref>]. Finally, regulatory bodies should exist that can enforce privacy protections and provide recourse when entities fail to protect sensitive data or violate consent for data use [<xref ref-type="bibr" rid="ref30">30</xref>].</p></sec></sec><sec id="s7"><title>Implementation: Organizational Readiness</title><sec id="s7-1"><title>Integrating AI Into Education and Government Workforce Development</title><p>Successful AI deployment in the public sector hinges not only on technological capabilities but equally on organizational readiness and workforce expertise. Evidence from previous digital health initiatives demonstrates that technology alone is insufficient; effective implementation requires complementary investments in organizational capacity, data infrastructure, and workforce skills [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref12">12</xref>].</p><p>The G7 Toolkit for Artificial Intelligence in the Public Sector identifies workforce digital skills as a foundational requirement for effective and responsible AI deployment in government contexts. A multi-layered approach is essential to building this capacity. Foundational AI literacy programs can equip public officials with an understanding of both AI&#x2019;s potential and its inherent limitations. Expert workshops utilizing scenario-based exercises represent another valuable resource that could help decision-makers navigate the complex trade-offs between privacy protection, equity considerations, and operational efficiency. Historical analysis also offers great learning opportunities. By examining case studies demonstrating predictive disease surveillance and resource allocation during past emergencies, officials can visualize concrete applications of AI and develop strategies to manage implementation challenges as they arise.</p></sec><sec id="s7-2"><title>Considerations for Implementation Into Workflows, Policy Formulation, Program Planning, and Resource Allocation</title><p>A successful deployment of artificial intelligence during public health emergencies extends well beyond technological considerations, requiring robust institutional frameworks and operational adaptations. Historically, expert consensus functioned as a governance mechanism through which policy alternatives were prioritized. The absence of a standardized governance framework for AI implementation may create significant hesitation among elected officials to support AI-derived recommendations, particularly in situations when actions are under public scrutiny [<xref ref-type="bibr" rid="ref32">32</xref>].</p><p>AI integration into public health decision-making would also require a seamless alignment with existing processes and adaptive governance structures. The fragmentation of data across multiple stakeholders and information systems could result in delayed or incompatible information transfer, compromising model accuracy when rapid analytical insights are most critical [<xref ref-type="bibr" rid="ref33">33</xref>]. At the operational level, AI outputs must be integrated directly into existing crisis-management dashboards and consoles so that public health officials can obtain timely and actionable insights without redundant data entry or additional interfaces [<xref ref-type="bibr" rid="ref34">34</xref>].</p><p>The implementation of AI tools during a public health emergency involves significant resource considerations beyond software licensing and hardware acquisition. Investments in data management infrastructure, cybersecurity protections, and dedicated personnel are also essential. Additionally, ongoing model retraining and impact assessments are required to maintain relevance as conditions evolve [<xref ref-type="bibr" rid="ref35">35</xref>]. The resource allocation challenge is further complicated in emergency settings, where bandwidth and computational resources face competition from numerous concurrent priorities such as other critical emergency response functions.</p><p>Artificial intelligence systems can function as decision support mechanisms that enhance rather than supplant the judgment of elected officials and their technical advisors when designed with appropriate architectural safeguards. The safeguard features could include human validation functions that allow flagging specific output for expert review at key decision points [<xref ref-type="bibr" rid="ref36">36</xref>], an iterative feedback loop function to allow officials to log disagreements with model outputs and feed these back into model retraining [<xref ref-type="bibr" rid="ref33">33</xref>], an ensemble function that can combine model predictions with expert systems to ensure known constraints are reinforced alongside data-driven insights [<xref ref-type="bibr" rid="ref37">37</xref>].</p><p><bold>Case Study:</bold> To illustrate how these implementation considerations manifest in practice, we present a detailed case example:</p></sec><sec id="s7-3"><title>Hospital Surge Prediction During Influenza Season</title><p>A state health department seeks to deploy an AI forecasting model to predict hospital bed needs 2&#x2010;4 weeks ahead during the influenza season, allowing proactive resource mobilization.</p><p><italic>Problem Definition</italic>: The health department, in consultation with hospital associations and emergency management, identifies specific decision needs: estimating intensive care unit (ICU) bed demand, identifying which geographic regions will face capacity constraints, and determining timing for activating surge capacity plans.</p><p><italic>Model Development</italic>: Data scientists develop an ensemble forecasting model using historical hospitalization data, influenza surveillance data, and demographic information. The model undergoes rigorous validation using holdout data from previous flu seasons, with performance assessed overall and for relevant subgroups (age categories, urban vs rural regions).</p><p><italic>Integration</italic>: The model is integrated into the state&#x2019;s existing emergency operations center dashboard, providing daily updated forecasts with clear uncertainty ranges. Training is provided to epidemiologists and emergency coordinators on interpreting forecasts and understanding uncertainty.</p><p><italic>Deployment</italic>: During flu season, forecasts inform daily situation reports to the governor&#x2019;s office. When the model predicts ICU capacity concerns in the jurisdiction&#x2019;s northern region, officials activate surge plans, coordinate patient transfers, and deploy mobile medical units. Importantly, epidemiologists review each forecast before distribution, flagging any predictions that seem inconsistent with other data sources.</p><p><italic>Failure Mode</italic>: Mid-season, a new influenza strain emerges with higher severity. The model, trained on previous seasons&#x2019; severity patterns, initially underestimates hospitalization demand. Human validation catches this discrepancy within 48 hours, prompting manual adjustments and communication to decision-makers about model limitations.</p><p><italic>Mitigation</italic>: The incident illustrates the importance of human oversight, ensemble modeling (the team supplements the AI forecast with expert judgment and alternative models), and clear uncertainty communication (decision-makers understand that forecasts carry substantial uncertainty during novel epidemiological situations).</p></sec></sec><sec id="s8"><title>Evaluation and Continuous Learning</title><p>Systematic evaluation is essential for responsible AI deployment in public health emergencies. Agencies should conduct pre-deployment testing through prospective validation using historical emergency data, simulation exercises, and tabletop drills to assess accuracy, identify failure modes, and evaluate integration with decision workflows before operational use. Performance benchmarking should establish baseline accuracy metrics, track performance over time, and, where possible compare AI-supported versus non-AI-supported decision outcomes to assess actual impact on decision quality. Real-time monitoring requires dashboards that track model performance during deployment with automated alerts for degradation, supplemented by regular expert review to identify issues not captured by automated metrics. Agencies must establish adverse event reporting mechanisms similar to health care systems, with clear protocols for investigating suspected AI-related errors or harms and implementing corrective actions. Post-emergency after-action reviews should systematically assess whether AI tools were useful, what worked well, what failed, how tools should be modified, and whether unintended consequences or equity impacts occurred. Finally, agencies should define clear decommissioning criteria and transition plans for retiring models when they exhibit poor performance, conditions change, or better alternatives become available.</p></sec><sec id="s9"><title>Policy Implications of AI in Public Health Decision Making</title><p>State-level legislative activity around governmental AI use has accelerated rapidly, with more than 150 bills introduced in 2024 and at least 30 states issuing formal guidance on AI use by state agencies. For example, Connecticut has implemented the AI Responsible Use Framework (Policy AI-01), which promotes ethical AI use, fairness, privacy, and transparency across all state agencies. Maryland enacted the Artificial Intelligence Governance Act of 2024, requiring each unit of state government to conduct inventories and assessments of systems that employ high-risk AI, and providing for the Department of Information Technology to develop policies and procedures concerning the development, procurement, deployment, use, and assessment of such systems [<xref ref-type="bibr" rid="ref38">38</xref>]. Vermont established the Division of Artificial Intelligence within the Agency of Digital Services to review all aspects of AI systems developed, employed, or procured in state government [<xref ref-type="bibr" rid="ref38">38</xref>].</p><p>At the federal level, several policies regulate the use of AI in governmental decision-making. The AI in Government Act of 2020 facilitates the adoption of AI technologies in the federal government, aiming to improve cohesion and competency in the adoption and use of AI within federal agencies. Executive Order 13960, titled &#x201C;Promoting the Use of Trustworthy Artificial Intelligence in the Federal Government,&#x201D; establishes the policy of the United States to promote the innovation and use of AI to improve government operations and services in a manner that fosters public trust and confidence while protecting privacy, civil rights, civil liberties, and American values. Additionally, the Office of Management and Budget (OMB) issued Memorandum M-25&#x2010;21, &#x201C;Accelerating Federal Use of AI through Innovation, Governance, and Public Trust,&#x201D; which provides guidance for federal agencies to enhance their AI governance and risk management practices [<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref41">41</xref>]. More recently, Executive Order 14,319 (July 2025), titled &#x201C;Preventing Woke AI in the Federal Government,&#x201D; required federal agencies to procure only AI models that prioritize truth-seeking and ideological neutrality, restricting the incorporation of diversity, equity, and inclusion frameworks into AI outputs [<xref ref-type="bibr" rid="ref42">42</xref>]. This policy places constraints on public health AI decision-making by limiting the use of equity-informed analytic approaches that are commonly used to identify health disparities and guide targeted interventions. In December 2025, Executive Order 14365, titled &#x201C;Ensuring a National Policy Framework for Artificial Intelligence,&#x201D; established a uniform national AI framework intended to preempt state regulations deemed burdensome to innovation. By authorizing federal challenges to inconsistent state laws and allowing the withholding of federal funding from non-compliant states, the order reshapes public health AI governance by centralizing oversight, potentially reducing states&#x2019; ability to tailor AI safeguards, transparency standards, and equity protections to local public health needs [<xref ref-type="bibr" rid="ref43">43</xref>].</p><p>[<xref ref-type="bibr" rid="ref38">38</xref>]To strengthen AI regulations in government, several policies should be implemented. Governments should establish ethical AI frameworks that operationalize the fairness and bias mitigation principles discussed earlier, with continuous assessments to identify biases, ensure privacy protection, and evaluate AI system reliability. More states should implement policies like Connecticut, Maryland, and Vermont to regulate AI use within state agencies to ensure the ethical use of AI systems in government operations [<xref ref-type="bibr" rid="ref38">38</xref>]. Additionally, mandatory impact assessments should be conducted before deploying AI systems, ensuring that risks, including bias and discrimination, are mitigated and disclosed publicly. Governments should also require AI systems to be explainable, allowing the public to understand the decision-making logic behind AI outcomes. For instance, New York has required state agencies to publicly disclose detailed information about their automated decision systems [<xref ref-type="bibr" rid="ref44">44</xref>]. Public oversight should be ensured through independent governance structures, such as Chief AI Officers, who would be responsible for ensuring AI deployment aligns with ethical standards and transparency.</p><p>Building public trust in AI use requires active engagement with stakeholders, including community leaders, ethicists, and public health experts, to ensure AI applications meet the needs and values of the community. Governments must prioritize clear and accessible communication about how AI is used in decision-making, including detailed reports on AI system performance, challenges, and successes. Regular audits should be conducted to address potential biases, ensuring AI systems are fair and inclusive. Furthermore, ethical AI governance structures should be implemented, including ethics boards and external reviews, to monitor and guide responsible AI use. Creating avenues for public feedback, such as town halls or online platforms, will allow communities to voice concerns and provide input on improving AI systems. Strong privacy protections should be enacted to safeguard citizens&#x2019; data, ensuring AI systems are compliant with privacy standards. Lastly, ongoing public education and training on AI literacy will help demystify AI and foster confidence in its use, ultimately increasing public trust in governmental AI decision-making.</p><p>However, even with emerging governance frameworks, empirical evidence suggests that formal governance structures do not automatically translate into improved outcomes [<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref48">48</xref>]. Cross-country studies find that standard governance indicators (government effectiveness, rule of law, transparency) show weak or inconsistent relationships with population health outcomes once other factors are controlled. Similarly, AI governance arrangements (such as ethical guidelines, transparency principles, and oversight bodies) do not necessarily translate into effective implementation at the operational level. This disconnect occurs because high-level institutional indicators may fail to capture sector-specific capacities essential for public health emergency response: data quality, frontline health system integration, decision-maker competence, organizational culture, and implementation fidelity. Successful AI governance requires attention to these operational realities, not just policy documents.</p></sec><sec id="s10"><title>The AI Decision Support Lifecycle for Public Health Emergencies</title><p>Drawing on the technical, ethical, organizational, and policy considerations discussed above, we propose an AI Decision Support Lifecycle framework specifically designed for governmental public health emergency response (<xref ref-type="fig" rid="figure1">Figure 1</xref>).</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>AI decision supported lifecycle in public health emergencies.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="publichealth_v12i1e88470_fig01.png"/></fig><p><italic>Phase 1</italic> consists of defining the problem at hand and conducting a needs assessment. Stakeholders must clarify the decisions that will be made, the information that is required to help decision makers, the end users of AI outputs, and the consequences of wrong decisions. This phase involves engaging elected officials, public health leaders, emergency managers, and community representatives to ensure AI development targets genuine decision needs.</p><p><italic>Phase 2</italic> consists of equity-driven, technically robust model development and validation. Key details and steps include using representative data, evaluating performance across demographic and geographic subgroups, and documenting model limitations and uncertainty. Additional safeguards include independent expert review prior to operational deployment.</p><p><italic>Phase 3</italic> includes integrating AI models into workflows and pre-deployment testing. AI tools must be integrated into existing workflows and information systems. Steps include but are not limited to developing user interfaces compatible with incident command structures, training end users on interpretation and appropriate use, and establishing clear protocols for human validation and override of AI recommendations.</p><p>In <italic>Phase 4</italic><bold>,</bold> a model is ready for deployment during an emergency. During an emergency, AI tools provide decision support while human officials retain ultimate authority. When AI is being deployed during an emergency, model performance must be monitored in real time, validating outputs using expert evaluation before high-stakes decisions, and clear communication of uncertainty to decision makers.</p><p>Monitoring and adaptation comprise <italic>Phase 5</italic>, which includes tracking accuracy metrics, comparing predictions to observed outcomes, and adjusting models as new data becomes available.</p><p>Finally, in <italic>Phase 6</italic>, models are evaluated after emergency deployment, and lessons learned are gleaned. Evaluation includes understanding the model&#x2019;s usefulness for decision making, identifying what worked well and what didn&#x2019;t as well as any unintended consequences, and understanding how the tool may be used or modified for future emergencies.</p><p>A number of critical principles apply throughout the phases of the AI Decision Support Lifecycle for Public Health Emergencies. First, it is vital that there are robust privacy and confidentiality protections in place and that there is limited data use for limited and specific purposes. Next, equity principles should be positioned as a key priority in model development and implementation. Similarly, there must be clear documentation for model development and use, explainability for model outputs, and public accountability for the use of AI in public health decision-making. Finally, human authority must remain the centerpiece of public health emergency decision-making. AI can only serve as decision support, never an autonomous decision-maker.</p><p><xref ref-type="table" rid="table1">Table 1</xref> organizes and synthesizes technical, ethical, organizational, and policy challenges that occur throughout the lifecycle, with specific actionable recommendations for model developers, health agencies, and elected officials.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Key challenges and stakeholder actions for AI integration in public health emergency decision-making.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Challenge area</td><td align="left" valign="bottom">Model developers</td><td align="left" valign="bottom">Health agencies &#x0026; officials</td><td align="left" valign="bottom">Policymakers &#x0026; elected officials</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="4">Technical challenges</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Model Development (underfitting, overfitting, data quality)</td><td align="left" valign="top">Ensure models are sensitive to data quality and representativeness; avoid over-specialization to training data</td><td align="left" valign="top">Work with modelers to validate models before deployment; establish validation processes for AI outputs</td><td align="left" valign="top">Understand model limitations; ensure AI enhances rather than supplants judgment</td></tr><tr><td align="left" valign="top" colspan="4">Ethical challenges</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Bias &#x0026; Fairness</td><td align="left" valign="top">Curate representative datasets; assess for performance disparities across subgroups; document model development transparently</td><td align="left" valign="top">Continuously evaluate models for bias; implement continuous monitoring and regular auditing of AI systems</td><td align="left" valign="top">Ensure decisions account for diverse communities; conduct regular audits for fairness and inclusiveness</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Privacy &#x0026; Data Security</td><td align="left" valign="top">Design models with privacy protections; maintain confidentiality of training and testing data</td><td align="left" valign="top">Implement robust data management infrastructure and cybersecurity protections</td><td align="left" valign="top">Enforce transparent risk management and privacy protection policies; ensure civil rights protections</td></tr><tr><td align="left" valign="top" colspan="4">Organizational challenges</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Workforce Development &#x0026; AI Literacy</td><td align="left" valign="top">Design user-friendly interfaces; provide training resources for end users</td><td align="left" valign="top">Implement foundational AI literacy programs; provide expert workshops using scenario-based training</td><td align="left" valign="top">Understand AI&#x2019;s potential and limitations; receive training to interpret and implement AI outputs</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Workflow Integration &#x0026; Implementation</td><td align="left" valign="top">Include human validation functions, feedback loops, and ensemble functions combining model predictions with expert systems</td><td align="left" valign="top">Integrate AI outputs into crisis-management dashboards; ensure seamless alignment with existing processes</td><td align="left" valign="top">Establish governance frameworks to reduce hesitation in supporting AI-derived recommendations</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Resource Allocation &#x0026; Infrastructure</td><td align="left" valign="top">Plan for ongoing maintenance and model updates beyond initial development</td><td align="left" valign="top">Invest in data management infrastructure, cybersecurity, and dedicated personnel</td><td align="left" valign="top">Allocate resources beyond software licensing and hardware acquisition</td></tr><tr><td align="left" valign="top" colspan="4">Policy &#x0026; governance challenges</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Governance Frameworks &#x0026; Standards</td><td align="left" valign="top">Follow ethical AI principles prioritizing fairness, transparency, and accountability</td><td align="left" valign="top">Develop adaptive governance structures; implement continuous risk assessments</td><td align="left" valign="top">Establish ethical AI frameworks at federal and state levels; implement mandatory impact assessments</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Public Trust &#x0026; Stakeholder Engagement</td><td align="left" valign="top">Design transparent systems that enable public understanding of AI decision-making processes</td><td align="left" valign="top">Engage with community leaders, ethicists, and public health experts; provide detailed performance reports</td><td align="left" valign="top">Ensure AI applications meet community needs and values; prioritize clear communication about AI use</td></tr></tbody></table></table-wrap></sec><sec id="s11" sec-type="conclusions"><title>Conclusion</title><p>Artificial intelligence holds substantial promise for supporting governmental public health emergency response, but responsible implementation requires adherence to core ethical principles rather than simply deploying technology. First, AI must function strictly as decision-support tools while preserving human authority. Elected officials and public health leaders retain ultimate accountability for decisions, with clear protocols for validating and overriding AI recommendations. Second, equity must be designed into AI systems from inception, not treated as an afterthought, through representative data, subgroup performance evaluation, community advisory boards, and transparent equity impact reporting. Third, successful AI deployment demands comprehensive organizational readiness, including workforce capacity (data scientists working collaboratively with epidemiologists and communication specialists), robust data infrastructure, governance structures, and sustained funding beyond initial acquisition costs.</p><p>Fourth, transparency and accountability mechanisms are non-negotiable, requiring public disclosure of when AI informs decisions, comprehensive model documentation, explainable outputs, and clear processes for challenging AI-informed decisions. Finally, continuous monitoring, evaluation, and adaptation throughout the AI lifecycle ensures systems remain reliable as conditions change, with pre-deployment testing, real-time performance monitoring, adverse event reporting, and willingness to decommission underperforming models. Achieving AI&#x2019;s potential requires genuine collaboration between technical developers, public health agencies, and elected officials, with each stakeholder group taking specific, complementary actions to ensure models meet legal and ethical standards while meaningfully supporting evidence-based decision-making that reduces rather than exacerbates health inequities.</p></sec></body><back><notes><sec><title>Funding</title><p>The Center for Outbreak Response Innovation (CORI) is supported through Cooperative Agreement NU38FT000004 between CDC's Center for Forecasting and Outbreak Analytics and Johns Hopkins University's Bloomberg School of Public Health.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">ICT</term><def><p>information and communication technology</p></def></def-item><def-item><term id="abb3">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb4">OMB</term><def><p>Office of Management and Budget</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="web"><article-title>US centers for disease control and prevention</article-title><source>Public Health Emergency Preparedness and Response Capabilities</source><year>2025</year><access-date>2026-01-30</access-date><publisher-name>State Local Readiness</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.cdc.gov/readiness/php/capabilities/index.html">https://www.cdc.gov/readiness/php/capabilities/index.html</ext-link></comment></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nelson</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lurie</surname><given-names>N</given-names> </name><name name-style="western"><surname>Wasserman</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zakowski</surname><given-names>S</given-names> </name></person-group><article-title>Conceptualizing and defining public health emergency preparedness</article-title><source>Am J Public Health</source><year>2007</year><month>04</month><volume>97 Suppl 1</volume><issue>Suppl 1</issue><fpage>S9</fpage><lpage>11</lpage><pub-id pub-id-type="doi">10.2105/AJPH.2007.114496</pub-id><pub-id pub-id-type="medline">17413078</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hodge</surname><given-names>JG</given-names> </name><name name-style="western"><surname>Dunning</surname><given-names>LT</given-names> </name><name name-style="western"><surname>Piatt</surname><given-names>JL</given-names> </name></person-group><article-title>State public health emergency powers in response to COVID-19</article-title><source>Am J Public Health</source><year>2023</year><month>03</month><volume>113</volume><issue>3</issue><fpage>275</fpage><lpage>279</lpage><pub-id pub-id-type="doi">10.2105/AJPH.2022.307158</pub-id><pub-id pub-id-type="medline">36521072</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Orenstein</surname><given-names>DG</given-names> </name></person-group><article-title>When law is not law: setting aside legal provisions during declared emergencies</article-title><source>J Law Med Ethics</source><year>2013</year><month>03</month><volume>41 Suppl 1</volume><issue>S1</issue><fpage>73</fpage><lpage>76</lpage><pub-id pub-id-type="doi">10.1111/jlme.12044</pub-id><pub-id pub-id-type="medline">23590746</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Hodge</surname><given-names>JG</given-names> </name><name name-style="western"><surname>Anderson</surname><given-names>ED</given-names> </name></person-group><source>Principles and practice of legal triage during public health emergencies [Abstract]</source><year>2009</year><access-date>2025-07-10</access-date><publisher-name>Social Science Research Network</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://papers.ssrn.com/abstract=1335342">https://papers.ssrn.com/abstract=1335342</ext-link></comment></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sunshine</surname><given-names>G</given-names> </name><name name-style="western"><surname>Barrera</surname><given-names>N</given-names> </name><name name-style="western"><surname>Corcoran</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Penn</surname><given-names>M</given-names> </name></person-group><article-title>Emergency declarations for public health issues: expanding our definition of emergency</article-title><source>J Law Med Ethics</source><year>2019</year><month>06</month><volume>47</volume><issue>2_suppl</issue><fpage>95</fpage><lpage>99</lpage><pub-id pub-id-type="doi">10.1177/1073110519857328</pub-id><pub-id pub-id-type="medline">31298138</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Higgins</surname><given-names>G</given-names> </name><name name-style="western"><surname>Freedman</surname><given-names>J</given-names> </name></person-group><article-title>Improving decision making in crisis</article-title><source>JBCEP</source><year>2013</year><volume>7</volume><issue>1</issue><fpage>65</fpage><pub-id pub-id-type="doi">10.69554/AYCK9571</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Agarwal</surname><given-names>R</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>G (Gordon</given-names> </name><name name-style="western"><surname>DesRoches</surname><given-names>C</given-names> </name><name name-style="western"><surname>Jha</surname><given-names>AK</given-names> </name></person-group><article-title><bold>Research Commentary</bold> &#x2014;The digital transformation of healthcare: current status and the road ahead</article-title><source>Information Systems Research</source><year>2010</year><month>12</month><volume>21</volume><issue>4</issue><fpage>796</fpage><lpage>809</lpage><pub-id pub-id-type="doi">10.1287/isre.1100.0327</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cline</surname><given-names>GB</given-names> </name><name name-style="western"><surname>Luiz</surname><given-names>JM</given-names> </name></person-group><article-title>Information technology systems in public sector health facilities in developing countries: the case of South Africa</article-title><source>BMC Med Inform Decis Mak</source><year>2013</year><month>01</month><day>24</day><volume>13</volume><fpage>13</fpage><pub-id pub-id-type="doi">10.1186/1472-6947-13-13</pub-id><pub-id pub-id-type="medline">23347433</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sheikh</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sood</surname><given-names>HS</given-names> </name><name name-style="western"><surname>Bates</surname><given-names>DW</given-names> </name></person-group><article-title>Leveraging health information technology to achieve the &#x201C;triple aim&#x201D; of healthcare reform</article-title><source>J Am Med Inform Assoc</source><year>2015</year><month>07</month><volume>22</volume><issue>4</issue><fpage>849</fpage><lpage>856</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocv022</pub-id><pub-id pub-id-type="medline">25882032</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fichman</surname><given-names>RG</given-names> </name><name name-style="western"><surname>Kohli</surname><given-names>R</given-names> </name><name name-style="western"><surname>Krishnan</surname><given-names>R</given-names> </name></person-group><article-title><bold>Editorial Overview</bold> &#x2014;The role of information systems in healthcare: current research and future trends</article-title><source>Information Systems Research</source><year>2011</year><month>09</month><volume>22</volume><issue>3</issue><fpage>419</fpage><lpage>428</lpage><pub-id pub-id-type="doi">10.1287/isre.1110.0382</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Scott</surname><given-names>RE</given-names> </name><name name-style="western"><surname>Mars</surname><given-names>M</given-names> </name></person-group><article-title>Principles and framework for eHealth strategy development</article-title><source>J Med Internet Res</source><year>2013</year><month>07</month><day>30</day><volume>15</volume><issue>7</issue><fpage>e155</fpage><pub-id pub-id-type="doi">10.2196/jmir.2250</pub-id><pub-id pub-id-type="medline">23900066</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="report"><person-group person-group-type="author"><collab>OECD, UNESCO, G7 Italia</collab></person-group><article-title>G7 toolkit for artificial intelligence in the public sector</article-title><year>2024</year><access-date>2026-01-30</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.oecd.org/en/publications/g7-toolkit-for-artificial-intelligence-in-the-public-sector_421c1244-en.html">https://www.oecd.org/en/publications/g7-toolkit-for-artificial-intelligence-in-the-public-sector_421c1244-en.html</ext-link></comment></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Sarro</surname><given-names>F</given-names> </name><name name-style="western"><surname>Harman</surname><given-names>M</given-names> </name></person-group><article-title>A comprehensive empirical study of bias mitigation methods for machine learning classifiers</article-title><source>ACM Transactions on Software Engineering and Methodology</source><year>2023</year><volume>32</volume><issue>4</issue><fpage>1</fpage><lpage>30</lpage><pub-id pub-id-type="doi">10.1145/3583561</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Benjamin</surname><given-names>R</given-names> </name></person-group><article-title>Assessing risk, automating racism</article-title><source>Science</source><year>2019</year><month>10</month><day>25</day><volume>366</volume><issue>6464</issue><fpage>421</fpage><lpage>422</lpage><pub-id pub-id-type="doi">10.1126/science.aaz3873</pub-id><pub-id pub-id-type="medline">31649182</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mhasawade</surname><given-names>V</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Chunara</surname><given-names>R</given-names> </name></person-group><article-title>Machine learning and algorithmic fairness in public and population health</article-title><source>Nat Mach Intell</source><year>2021</year><volume>3</volume><issue>8</issue><fpage>659</fpage><lpage>666</lpage><pub-id pub-id-type="doi">10.1038/s42256-021-00373-4</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McCradden</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Joshi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Anderson</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Mazwi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Goldenberg</surname><given-names>A</given-names> </name><name name-style="western"><surname>Zlotnik Shaul</surname><given-names>R</given-names> </name></person-group><article-title>Patient safety and quality improvement: Ethical principles for a regulatory approach to bias in healthcare machine learning</article-title><source>J Am Med Inform Assoc</source><year>2020</year><month>12</month><day>9</day><volume>27</volume><issue>12</issue><fpage>2024</fpage><lpage>2027</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocaa085</pub-id><pub-id pub-id-type="medline">32585698</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Obermeyer</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Powers</surname><given-names>B</given-names> </name><name name-style="western"><surname>Vogeli</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mullainathan</surname><given-names>S</given-names> </name></person-group><source>Dissecting racial bias in an algorithm used to manage the health of populations</source><year>2019</year><access-date>2025-04-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.science.org/doi/10.1126/science.aax2342">https://www.science.org/doi/10.1126/science.aax2342</ext-link></comment></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Young</surname><given-names>JC</given-names> </name><name name-style="western"><surname>Conover</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Funk</surname><given-names>MJ</given-names> </name></person-group><article-title>Measurement error and misclassification in electronic medical records: methods to mitigate bias</article-title><source>Curr Epidemiol Rep</source><year>2018</year><month>12</month><volume>5</volume><issue>4</issue><fpage>343</fpage><lpage>356</lpage><pub-id pub-id-type="doi">10.1007/s40471-018-0164-x</pub-id><pub-id pub-id-type="medline">35633879</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Campbell</surname><given-names>EA</given-names> </name><name name-style="western"><surname>Bose</surname><given-names>S</given-names> </name><name name-style="western"><surname>Masino</surname><given-names>AJ</given-names> </name></person-group><article-title>Conceptualizing bias in EHR data: A case study in performance disparities by demographic subgroups for a pediatric obesity incidence classifier</article-title><source>PLOS Digit Health</source><year>2024</year><month>10</month><volume>3</volume><issue>10</issue><fpage>e0000642</fpage><pub-id pub-id-type="doi">10.1371/journal.pdig.0000642</pub-id><pub-id pub-id-type="medline">39441784</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Andaur Navarro</surname><given-names>CL</given-names> </name><name name-style="western"><surname>Damen</surname><given-names>JAA</given-names> </name><name name-style="western"><surname>Takada</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Risk of bias in studies on prediction models developed using supervised machine learning techniques: systematic review</article-title><source>BMJ</source><year>2021</year><month>10</month><day>20</day><volume>375</volume><fpage>n2281</fpage><pub-id pub-id-type="doi">10.1136/bmj.n2281</pub-id><pub-id pub-id-type="medline">34670780</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Pathak</surname><given-names>YV</given-names> </name><name name-style="western"><surname>Saikia</surname><given-names>S</given-names> </name><name name-style="western"><surname>Pathak</surname><given-names>S</given-names> </name><name name-style="western"><surname>Patel</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Prajapati</surname><given-names>JB</given-names></name></person-group><source>Ethical Issues in AI for Bioinformatics and Chemoinformatics</source><year>2023</year><publisher-name>CRC Press</publisher-name><pub-id pub-id-type="doi">10.1201/9781003353751</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>London</surname><given-names>AJ</given-names> </name></person-group><article-title>Artificial intelligence and black-box medical decisions: accuracy versus explainability</article-title><source>Hastings Cent Rep</source><year>2019</year><month>01</month><volume>49</volume><issue>1</issue><fpage>15</fpage><lpage>21</lpage><pub-id pub-id-type="doi">10.1002/hast.973</pub-id><pub-id pub-id-type="medline">30790315</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gerke</surname><given-names>S</given-names> </name><name name-style="western"><surname>Minssen</surname><given-names>T</given-names> </name><name name-style="western"><surname>Cohen</surname><given-names>G</given-names> </name></person-group><article-title>Ethical and legal challenges of artificial intelligence-driven healthcare</article-title><source>Artif Intell Healthc</source><year>2020</year><fpage>295</fpage><lpage>336</lpage><pub-id pub-id-type="doi">10.1016/B978-0-12-818438-7.00012-5</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Martin</surname><given-names>KD</given-names> </name><name name-style="western"><surname>Zimmermann</surname><given-names>J</given-names> </name></person-group><article-title>Artificial intelligence and its implications for data privacy</article-title><source>Curr Opin Psychol</source><year>2024</year><month>08</month><volume>58</volume><fpage>101829</fpage><pub-id pub-id-type="doi">10.1016/j.copsyc.2024.101829</pub-id><pub-id pub-id-type="medline">38954851</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Oseni</surname><given-names>A</given-names> </name><name name-style="western"><surname>Moustafa</surname><given-names>N</given-names> </name><name name-style="western"><surname>Janicke</surname><given-names>H</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>P</given-names> </name><name name-style="western"><surname>Tari</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Vasilakos</surname><given-names>A</given-names> </name></person-group><article-title>Security and privacy for artificial intelligence: opportunities and challenges</article-title><source>arXiv</source><access-date>2025-04-15</access-date><comment>Preprint posted online on 2021</comment><pub-id pub-id-type="doi">10.48550/arXiv.2102.04661</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bak</surname><given-names>M</given-names> </name><name name-style="western"><surname>Madai</surname><given-names>VI</given-names> </name><name name-style="western"><surname>Fritzsche</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Mayrhofer</surname><given-names>MT</given-names> </name><name name-style="western"><surname>McLennan</surname><given-names>S</given-names> </name></person-group><article-title>You can&#x2019;t have ai both ways: balancing health data privacy and access fairly</article-title><source>Front Genet</source><year>2022</year><volume>13</volume><fpage>929453</fpage><pub-id pub-id-type="doi">10.3389/fgene.2022.929453</pub-id><pub-id pub-id-type="medline">35769991</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yadav</surname><given-names>N</given-names> </name><name name-style="western"><surname>Pandey</surname><given-names>S</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dudani</surname><given-names>P</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rangarajan</surname><given-names>K</given-names> </name></person-group><article-title>Data privacy in healthcare: in the era of artificial intelligence</article-title><source>Indian Dermatol Online J</source><year>2023</year><volume>14</volume><issue>6</issue><fpage>788</fpage><lpage>792</lpage><pub-id pub-id-type="doi">10.4103/idoj.idoj_543_23</pub-id><pub-id pub-id-type="medline">38099022</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Curzon</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kosa</surname><given-names>TA</given-names> </name><name name-style="western"><surname>Akalu</surname><given-names>R</given-names> </name><name name-style="western"><surname>El-Khatib</surname><given-names>K</given-names> </name></person-group><article-title>Privacy and artificial intelligence</article-title><source>IEEE Trans Artif Intell</source><year>2021</year><volume>2</volume><issue>2</issue><fpage>96</fpage><lpage>108</lpage><pub-id pub-id-type="doi">10.1109/TAI.2021.3088084</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Andreotta</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Kirkham</surname><given-names>N</given-names> </name><name name-style="western"><surname>Rizzi</surname><given-names>M</given-names> </name></person-group><article-title>AI, big data, and the future of consent</article-title><source>AI Soc</source><year>2022</year><volume>37</volume><issue>4</issue><fpage>1715</fpage><lpage>1728</lpage><pub-id pub-id-type="doi">10.1007/s00146-021-01262-5</pub-id><pub-id pub-id-type="medline">34483498</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Adapa</surname><given-names>VR</given-names> </name></person-group><article-title>Navigating the privacy paradox: balancing ai advancement and data protection in the digital age</article-title><source>Int J Sci Res Comput Sci Eng Inf Technol</source><year>2024</year><volume>10</volume><issue>6</issue><fpage>99</fpage><lpage>110</lpage><pub-id pub-id-type="doi">10.32628/CSEIT24106158</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stanford</surname><given-names>V</given-names> </name><name name-style="western"><surname>Gresh</surname><given-names>L</given-names> </name><name name-style="western"><surname>Toledo</surname><given-names>J</given-names> </name><name name-style="western"><surname>M&#x00E9;ndez</surname><given-names>J</given-names> </name><name name-style="western"><surname>Aldighieri</surname><given-names>S</given-names> </name><name name-style="western"><surname>Reveiz</surname><given-names>L</given-names> </name></person-group><article-title>Evidence in decision-making in the context of COVID-19 in Latin America</article-title><source>Lancet Reg Health Am</source><year>2022</year><month>10</month><volume>14</volume><fpage>100322</fpage><pub-id pub-id-type="doi">10.1016/j.lana.2022.100322</pub-id><pub-id pub-id-type="medline">35879980</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Jacob</surname><given-names>J</given-names> </name><name name-style="western"><surname>Parker</surname><given-names>GJM</given-names> </name><name name-style="western"><surname>Hawkes</surname><given-names>DJ</given-names> </name><name name-style="western"><surname>Hurst</surname><given-names>JR</given-names> </name><name name-style="western"><surname>Stoyanov</surname><given-names>D</given-names> </name></person-group><article-title>The challenges of deploying artificial intelligence models in a rapidly evolving pandemic</article-title><source>Nat Mach Intell</source><year>2020</year><volume>2</volume><issue>6</issue><fpage>298</fpage><lpage>300</lpage><pub-id pub-id-type="doi">10.1038/s42256-020-0185-2</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Medaglia</surname><given-names>R</given-names> </name><name name-style="western"><surname>Zheng</surname><given-names>L</given-names> </name></person-group><article-title>Mapping government social media research and moving it forward: a framework and a research agenda</article-title><source>Gov Inf Q</source><year>2017</year><month>09</month><volume>34</volume><issue>3</issue><fpage>496</fpage><lpage>510</lpage><pub-id pub-id-type="doi">10.1016/j.giq.2017.06.001</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ding</surname><given-names>X</given-names> </name><name name-style="western"><surname>Shang</surname><given-names>B</given-names> </name><name name-style="western"><surname>Xie</surname><given-names>C</given-names> </name><name name-style="western"><surname>Xin</surname><given-names>J</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>F</given-names> </name></person-group><article-title>Artificial intelligence in the COVID-19 pandemic: balancing benefits and ethical challenges in China&#x2019;s response</article-title><source>Humanit Soc Sci Commun</source><year>2025</year><volume>12</volume><issue>1</issue><fpage>1</fpage><lpage>19</lpage><pub-id pub-id-type="doi">10.1057/s41599-025-04564-x</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McAndrew</surname><given-names>T</given-names> </name><name name-style="western"><surname>Gibson</surname><given-names>GC</given-names> </name><name name-style="western"><surname>Braun</surname><given-names>D</given-names> </name><name name-style="western"><surname>Srivastava</surname><given-names>A</given-names> </name><name name-style="western"><surname>Brown</surname><given-names>K</given-names> </name></person-group><article-title>Chimeric Forecasting: an experiment to leverage human judgment to improve forecasts of infectious disease using simulated surveillance data</article-title><source>Epidemics</source><year>2024</year><month>06</month><volume>47</volume><fpage>100756</fpage><pub-id pub-id-type="doi">10.1016/j.epidem.2024.100756</pub-id><pub-id pub-id-type="medline">38452456</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Roy</surname><given-names>K</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Gaur</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sheth</surname><given-names>A</given-names> </name></person-group><article-title>Knowledge infused policy gradients for adaptive pandemic control</article-title><source>arXiv</source><access-date>2025-04-24</access-date><comment>Preprint posted online on 2021</comment><pub-id pub-id-type="doi">10.48550/arXiv.2102.06245</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Hooshidary</surname><given-names>S</given-names> </name><name name-style="western"><surname>Canada</surname><given-names>C</given-names> </name><name name-style="western"><surname>Clark</surname><given-names>W</given-names> </name></person-group><article-title>Artificial intelligence in government: the federal and state landscape</article-title><source>NCSL</source><access-date>2025-04-24</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.ncsl.org/technology-and-communication/artificial-intelligence-in-government-the-federal-and-state-landscape">https://www.ncsl.org/technology-and-communication/artificial-intelligence-in-government-the-federal-and-state-landscape</ext-link></comment></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Campbell</surname><given-names>EA</given-names> </name><name name-style="western"><surname>Holl</surname><given-names>F</given-names> </name><name name-style="western"><surname>Marwah</surname><given-names>HK</given-names> </name><name name-style="western"><surname>Fraser</surname><given-names>HS</given-names> </name><name name-style="western"><surname>Craig</surname><given-names>SS</given-names> </name></person-group><article-title>The impact of climate change on vulnerable populations in pediatrics: opportunities for AI, digital health, and beyond&#x2014;a scoping review and selected case studies</article-title><source>Pediatr Res</source><year>2025</year><month>10</month><volume>98</volume><issue>4</issue><fpage>1250</fpage><lpage>1256</lpage><pub-id pub-id-type="doi">10.1038/s41390-024-03719-x</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="web"><article-title>United States White House</article-title><source>Removing barriers to American leadership in artificial intelligence</source><year>2025</year><access-date>2025-04-24</access-date><publisher-name>White House</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.whitehouse.gov/presidential-actions/2025/01/removing-barriers-to-american-leadership-in-artificial-intelligence">https://www.whitehouse.gov/presidential-actions/2025/01/removing-barriers-to-american-leadership-in-artificial-intelligence</ext-link></comment></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="web"><article-title>United States White House</article-title><source>White House releases new policies on federal agency AI use and procurement</source><access-date>2025-04-24</access-date><publisher-name>White House</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.whitehouse.gov/articles/2025/04/white-house-releases-new-policies-on-federal-agency-ai-use-and-procurement/">https://www.whitehouse.gov/articles/2025/04/white-house-releases-new-policies-on-federal-agency-ai-use-and-procurement/</ext-link></comment></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="web"><article-title>United States White House</article-title><source>Preventing Woke AI in the Federal Government</source><year>2025</year><access-date>2026-02-09</access-date><publisher-name>White House</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.whitehouse.gov/presidential-actions/2025/07/preventing-woke-ai-in-the-federal-government">https://www.whitehouse.gov/presidential-actions/2025/07/preventing-woke-ai-in-the-federal-government</ext-link></comment></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="web"><article-title>United States White House</article-title><source>Ensuring a National Policy Framework for Artificial Intelligence</source><year>2025</year><access-date>2026-02-09</access-date><publisher-name>White House</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.whitehouse.gov/presidential-actions/2025/12/eliminating-state-law-obstruction-of-national-artificial-intelligence-policy">https://www.whitehouse.gov/presidential-actions/2025/12/eliminating-state-law-obstruction-of-national-artificial-intelligence-policy</ext-link></comment></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="web"><article-title>Erence of state legislatures</article-title><source>Artificial Intelligence 2025 Legislation</source><year>2025</year><access-date>2026-02-09</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.ncsl.org/technology-and-communication/artificial-intelligence-2025-legislation">https://www.ncsl.org/technology-and-communication/artificial-intelligence-2025-legislation</ext-link></comment></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Deaton</surname><given-names>A</given-names> </name></person-group><source>The Great Escape: Health, wealth, and the origins of inequality</source><year>2013</year><access-date>2026-02-06</access-date><publisher-name>Princeton University Press</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://press.princeton.edu/books/hardcover/9780691153544/the-great-escape?srsltid=AfmBOooNBA9EvNKVTtDSApNys3_pCqq1V_CWoZvBVv_EmEdpuCfBMXtu">https://press.princeton.edu/books/hardcover/9780691153544/the-great-escape?srsltid=AfmBOooNBA9EvNKVTtDSApNys3_pCqq1V_CWoZvBVv_EmEdpuCfBMXtu</ext-link></comment></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Filmer</surname><given-names>D</given-names> </name><name name-style="western"><surname>Pritchett</surname><given-names>L</given-names> </name></person-group><article-title>The impact of public spending on health: does money matter?</article-title><source>Soc Sci Med</source><year>1999</year><month>11</month><volume>49</volume><issue>10</issue><fpage>1309</fpage><lpage>1323</lpage><pub-id pub-id-type="doi">10.1016/S0277-9536(99)00150-1</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wagstaff</surname><given-names>A</given-names> </name></person-group><article-title>Poverty and health sector inequalities</article-title><source>Bull World Health Organ</source><year>2002</year><volume>80</volume><issue>2</issue><fpage>97</fpage><lpage>105</lpage><pub-id pub-id-type="medline">11953787</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Holmberg</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rothstein</surname><given-names>B</given-names> </name></person-group><article-title>Dying of corruption</article-title><source>Health Econ Policy Law</source><year>2011</year><month>10</month><volume>6</volume><issue>4</issue><fpage>529</fpage><lpage>547</lpage><pub-id pub-id-type="doi">10.1017/S174413311000023X</pub-id><pub-id pub-id-type="medline">20809992</pub-id></nlm-citation></ref></ref-list></back></article>