<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Public Health Surveill</journal-id><journal-id journal-id-type="publisher-id">publichealth</journal-id><journal-id journal-id-type="index">9</journal-id><journal-title>JMIR Public Health and Surveillance</journal-title><abbrev-journal-title>JMIR Public Health Surveill</abbrev-journal-title><issn pub-type="epub">2369-2960</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v11i1e68952</article-id><article-id pub-id-type="doi">10.2196/68952</article-id><article-categories><subj-group subj-group-type="heading"><subject>Viewpoint</subject></subj-group></article-categories><title-group><article-title>Machine Learning Applications in Population and Public Health: Guidelines for Development, Testing, and Implementation</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Pinto</surname><given-names>Andrew D</given-names></name><degrees>MSc, MD, CCFP</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Birdi</surname><given-names>Sharon</given-names></name><degrees>MBT</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Durant</surname><given-names>Steve</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Rabet</surname><given-names>Roxana</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Parekh</surname><given-names>Rahul</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Ali</surname><given-names>Shehzad</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff6">6</xref><xref ref-type="aff" rid="aff7">7</xref><xref ref-type="aff" rid="aff8">8</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Buckeridge</surname><given-names>David</given-names></name><degrees>MSc, MD, PhD</degrees><xref ref-type="aff" rid="aff9">9</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Ghassemi</surname><given-names>Marzyeh</given-names></name><degrees>MSc, PhD</degrees><xref ref-type="aff" rid="aff10">10</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Gibson</surname><given-names>Jennifer</given-names></name><degrees>MA, PhD</degrees><xref ref-type="aff" rid="aff11">11</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>John-Baptiste</surname><given-names>Ava</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff12">12</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Macklin</surname><given-names>Jillian</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff13">13</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>McCradden</surname><given-names>Melissa D</given-names></name><degrees>MHSc, PhD</degrees><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="aff" rid="aff14">14</xref><xref ref-type="aff" rid="aff15">15</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>McKenzie</surname><given-names>Kwame</given-names></name><degrees>MBBS</degrees><xref ref-type="aff" rid="aff16">16</xref><xref ref-type="aff" rid="aff17">17</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Naraei</surname><given-names>Parisa</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff18">18</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Owusu-Bempah</surname><given-names>Akwasi</given-names></name><degrees>MA, PhD</degrees><xref ref-type="aff" rid="aff19">19</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Rosella</surname><given-names>Laura C</given-names></name><degrees>MHSc, PhD</degrees><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="aff" rid="aff20">20</xref><xref ref-type="aff" rid="aff21">21</xref><xref ref-type="aff" rid="aff22">22</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Shaw</surname><given-names>James</given-names></name><degrees>MPT, PhD</degrees><xref ref-type="aff" rid="aff23">23</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Upshur</surname><given-names>Ross</given-names></name><degrees>MA, MSc, MD, CCFP</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="aff" rid="aff11">11</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Mishra</surname><given-names>Sharmistha</given-names></name><degrees>MSc, MD, PhD</degrees><xref ref-type="aff" rid="aff24">24</xref><xref ref-type="aff" rid="aff25">25</xref><xref ref-type="aff" rid="aff26">26</xref><xref ref-type="aff" rid="aff27">27</xref><xref ref-type="aff" rid="aff28">28</xref></contrib></contrib-group><aff id="aff1"><institution>Upstream Lab, MAP Centre for Urban Health Solutions, Li Ka Shing Knowledge Institute, Unity Health Toronto</institution><addr-line>30 Bond Street</addr-line><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff2"><institution>Department of Family and Community Medicine, St. Michael&#x2019;s Hospital</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff3"><institution>Department of Family and Community Medicine, Temerty Faculty of Medicine, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff4"><institution>Division of Clinical Public Health, Dalla Lana School of Public Health, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff5"><institution>Department of Family Medicine, College of Medicine, University of Saskatchewan</institution><addr-line>Saskatoon</addr-line><addr-line>SK</addr-line><country>Canada</country></aff><aff id="aff6"><institution>Department of Epidemiology and Biostatistics, Western Centre for Public Health &#x0026; Family Medicine, Western University</institution><addr-line>London</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff7"><institution>WHO Collaborating Centre for Knowledge Translation and Health Technology Assessment in Health Equity</institution><addr-line>Ottawa</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff8"><institution>Department of Health Sciences, University of York</institution><addr-line>York</addr-line><country>United Kingdom</country></aff><aff id="aff9"><institution>Department of Epidemiology, Biostatistics and Occupational Health, School of Population and Global Health, McGill University</institution><addr-line>Montreal</addr-line><addr-line>QC</addr-line><country>Canada</country></aff><aff id="aff10"><institution>Department of Electrical Engineering and Computer Science (EECS) and Institute for Medical Engineering and Science (IMES), Massachusetts Institute of Technology</institution><addr-line>Cambridge</addr-line><addr-line>MA</addr-line><country>United States</country></aff><aff id="aff11"><institution>Joint Centre for Bioethics, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff12"><institution>Departments of Epidemiology and Biostatistics, Anesthesia and Perioperative Medicine, and Schulich Interfaculty Program in Public Health, Western University</institution><addr-line>London</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff13"><institution>Undergraduate Medical Education, Temerty Faculty of Medicine, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff14"><institution>Department of Bioethics, Hospital for Sick Children</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff15"><institution>Australian Institute for Machine Learning, University of Adelaide</institution><addr-line>Adelaide</addr-line><country>Australia</country></aff><aff id="aff16"><institution>Wellesley Institute</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff17"><institution>Centre for Addiction and Mental Health</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff18"><institution>Department of Computer Science, Toronto Metropolitan University</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff19"><institution>Department of Sociology, Faculty of Arts and Sciences, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff20"><institution>Institute for Better Health, Trillium Health Partners</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff21"><institution>Department of Laboratory Medicine and Pathobiology, Temerty Faculty of Medicine</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff22"><institution>Division of Epidemiology, Dalla Lana School of Public Health, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff23"><institution>Department of Physical Therapy, Temerty Faculty of Medicine, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff24"><institution>Division of Infectious Diseases, Temerty Faculty of Medicine, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff25"><institution>MAP Centre for Urban Health Solutions, Li Ka Shing Knowledge Institute, Unity Health Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff26"><institution>Management and Evaluation, and Division of Epidemiology, Institute of Health Policy, Dalla Lana School of Public Health, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff27"><institution>Institute of Medical Science, Faculty of Medicine, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff28"><institution>Institute for Clinical Evaluative Sciences</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Neto</surname><given-names>Onicio Leal</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Chuang</surname><given-names>Elizabeth</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Ladhania</surname><given-names>Rahul</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Oworah</surname><given-names>Sunday</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Andrew D Pinto, MSc, MD, CCFP, Upstream Lab, MAP Centre for Urban Health Solutions, Li Ka Shing Knowledge Institute, Unity Health Toronto, 30 Bond Street, Toronto, ON, M5B 1W8, Canada, 1 4168646060 ext 76148; <email>andrew.pinto@utoronto.ca</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>24</day><month>10</month><year>2025</year></pub-date><volume>11</volume><elocation-id>e68952</elocation-id><history><date date-type="received"><day>18</day><month>11</month><year>2024</year></date><date date-type="rev-recd"><day>06</day><month>06</month><year>2025</year></date><date date-type="accepted"><day>06</day><month>06</month><year>2025</year></date></history><copyright-statement>&#x00A9; Andrew D Pinto, Sharon Birdi, Steve Durant, Roxana Rabet, Rahul Parekh, Shehzad Ali, David Buckeridge, Marzyeh Ghassemi, Jennifer Gibson, Ava John-Baptiste, Jillian Macklin, Melissa D McCradden, Kwame McKenzie, Parisa Naraei, Akwasi Owusu-Bempah, Laura C Rosella, James Shaw, Ross Upshur, Sharmistha Mishra. Originally published in JMIR Public Health and Surveillance (<ext-link ext-link-type="uri" xlink:href="https://publichealth.jmir.org">https://publichealth.jmir.org</ext-link>), 24.10.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Public Health and Surveillance, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://publichealth.jmir.org">https://publichealth.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://publichealth.jmir.org/2025/1/e68952"/><abstract><p>Machine learning (ML), a subset of artificial intelligence, uses large datasets to identify patterns between potential predictors and outcomes. ML involves iterative learning from data and is increasingly used in population and public health. Examples include early warning of infectious disease outbreaks, predicting the future burden of noncommunicable diseases, and assessing public health interventions. However, ML can inadvertently produce biased outputs related to the quality and quantity of data, who is engaged and helping direct the analysis, and how findings are interpreted. Specific guidelines for using ML in population and public health have not yet been created. We assembled a diverse team of experts in computer science, statistical modeling, clinical and population health epidemiology, health economics, ethics, sociology, and public health. Drawing on literature reviews and a modified Delphi process, we identified five key recommendations: (1) prioritize partnerships and interventions to support communities considered structurally disadvantaged; (2) use ML for dynamic situations, such as public health emergencies, while adhering to ethical standards; (3) conduct risk assessments and bias mitigation strategies aligned with identified risks; (4) ensure technical transparency and reproducibility by publicly sharing data sources and methodologies; and (5) foster multidisciplinary dialogue to discuss the potential harms of ML-related bias and raise awareness among the public and public health community. The proposed guidelines provide operational steps for stakeholders, ensuring that ML tools are not only effective but also ethically grounded and feasible in real-world scenarios.</p></abstract><kwd-group><kwd>population health</kwd><kwd>public health</kwd><kwd>machine learning</kwd><kwd>artificial intelligence</kwd><kwd>guideline</kwd><kwd>algorithmic bias</kwd><kwd>AI</kwd><kwd>health equity</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Machine learning (ML) is a form of artificial intelligence (AI) that is now used for a range of problems across many fields. ML involves a machine &#x201C;learning&#x201D; as it processes more data, improving predictive performance over time [<xref ref-type="bibr" rid="ref1">1</xref>]. ML, as a set of tools, can be used for prediction, clustering, and causal inference. While prediction models are commonly used in public health to examine associations between potential predictors and outcomes, ML can also be used to identify groups with shared characteristics (clustering) and to identify potential causal associations between interventions and health outcomes [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. However, these methods are often limited by the quality and quantity of the available data in public health [<xref ref-type="bibr" rid="ref4">4</xref>].</p><p>As methods advance and as the availability of data increases, ML-based innovations are playing a central role in population and public health [<xref ref-type="bibr" rid="ref5">5</xref>]. Key areas include the surveillance of infectious diseases [<xref ref-type="bibr" rid="ref6">6</xref>]; predicting the burden of noncommunicable diseases (NCDs) [<xref ref-type="bibr" rid="ref7">7</xref>]; and assessing public health interventions, including those focused on modifiable risk factors [<xref ref-type="bibr" rid="ref8">8</xref>]. Awareness that ML could help improve population health is tempered by a growing understanding that it has risks and potentially negative consequences [<xref ref-type="bibr" rid="ref9">9</xref>]. An important concern for the application of ML models is the perpetuation and amplification of biases reflecting patterns of societal inequality, when the accuracy of model predictions differs systematically across subpopulations, potentially leading to decisions that exacerbate health inequities [<xref ref-type="bibr" rid="ref10">10</xref>]. This issue is rooted in differences in the amount and quality of data from different populations [<xref ref-type="bibr" rid="ref11">11</xref>].</p><p>Bias in ML as it is applied to population and public health problems has been identified previously [<xref ref-type="bibr" rid="ref12">12</xref>]; however, guidelines for ML applied to population and public health are limited. Without such guidelines, ML could exacerbate inequities and fail to adhere to methodological standards. For example, during the COVID-19 pandemic, ML models were used to predict infection rates and allocate health care resources. These models were found to be biased, particularly when data from underrepresented communities were either scarce or of lower quality, impacting the accuracy of predictions [<xref ref-type="bibr" rid="ref13">13</xref>]. Public health organizations may lack the resources and expertise to thoroughly evaluate the ML models they use. While ethical frameworks for AI in health have emerged, they primarily focus on clinical settings. Public health involves unique applications, including population-level interventions, real-time surveillance, and communicable disease control, which require field-specific guidance. Our guidelines aim to address this gap by focusing on the distinct operational, ethical, and equity considerations involved in applying ML to public health. We aim to provide recommendations to those creating or revising ML tools for population and public health purposes, offer guidance to users of ML tools, and outline approaches on how to address bias. We centered our recommendations on the following questions: (1) What are best practices to identify and mitigate biases for those developing, testing, and implementing ML models in population health? (2) What are the priority areas for further research?</p><p>We used the GRADE (Grading of Recommendations Assessment, Development and Evaluation) [<xref ref-type="bibr" rid="ref14">14</xref>], the National Institute for Health and Care Excellence (NICE) [<xref ref-type="bibr" rid="ref15">15</xref>] approaches to guideline development, and other guidelines specific to epidemiology and health economics modeling [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref17">17</xref>]. We also drew insights from participatory frameworks for policy development governing ML [<xref ref-type="bibr" rid="ref18">18</xref>] and from documentation on the governance of community data trusts, which provide data for many of the ML models we studied [<xref ref-type="bibr" rid="ref19">19</xref>]. In addition, we considered international guidelines on AI ethics, such as the Montreal Declaration for Responsible Development of AI [<xref ref-type="bibr" rid="ref20">20</xref>] and recommendations from the European Commission High-Level Expert Group on AI [<xref ref-type="bibr" rid="ref21">21</xref>], along with the World Health Organization guidance on <italic>Ethics &#x0026; Governance of Artificial Intelligence for Health</italic> [<xref ref-type="bibr" rid="ref22">22</xref>].</p></sec><sec id="s2"><title>Guideline Panel Composition and Management of Competing Interests</title><p>To identify experts for the Delphi process, we consulted with leading researchers and practitioners in the fields of ML, public health, and ethics. We also searched online databases and professional networks to identify potential experts. We aimed to include experts from diverse backgrounds, including academia, industry, and government, with 17 chosen to be a part of the guideline panel (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Our guideline panel includes academics in computer science, statistical modeling, epidemiology (clinical epidemiology, social epidemiology, infectious disease, and population health), health economics, ethics, sociology, primary care, public health, and the social determinants of health. Each member has contributed their disciplinary expertise and perspective regarding the social constructions influencing data, bias, and bias mitigation (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Our panel members also bring diverse experiential knowledge, aligning with our guidelines&#x2019; emphasis on recognizing the importance of lived experience in equitable policy development. Panel members did not report any direct conflicts of interest. To ensure transparency, members also agreed to disclose any less direct or potential competing interests, but no such conflicts were found.</p></sec><sec id="s3"><title>Review of the Literature</title><p>We conducted a series of literature searches to retrieve existing guidelines related to population and public health, focusing on their intersections with equity, technical aspects, and knowledge mobilization. We searched a range of sources, including original research, reviews, commentaries, editorials, and gray literature (eg, government reports, policy documents, and organizational websites). In the course of this work, we drew on common elements found in other guidelines addressing bias in ML models [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. These included practices such as engaging with communities impacted by biased models, prioritizing diversity in the teams developing and implementing models, considering which groups considered marginalized could benefit most from involvement in population and public health ML model development, ensuring robustness in the identification of groups considered vulnerable in data, systematically evaluating and reporting on bias, and conducting post-implementation evaluations to continue mitigating bias. In parallel, we conducted and reported three scoping reviews on ML applications in population and public health, specifically examining their use in addressing risk factors, NCDs, and communicable diseases. Two of these reviews have been published, providing an empirical foundation for our guidelines[<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>]. The first review found that bias mitigation was rarely addressed in ML studies focused on population health, particularly in the context of NCDs, with most efforts limited to addressing sex-related bias [<xref ref-type="bibr" rid="ref25">25</xref>]. The second review, which examined ML applications for addressing risk factors for NCDs, found that although nine of the 20 studies mentioned algorithmic bias, these discussions were generally superficial and limited to traditional biases such as recall or misclassification [<xref ref-type="bibr" rid="ref26">26</xref>]. To date, few systematic reviews have examined bias in ML applications for public health. Our findings are supported by another systematic review on the use of AI and ML in disaster response and public health emergencies, which similarly noted a lack of bias mitigation strategies despite the growing use of ML in these settings [<xref ref-type="bibr" rid="ref27">27</xref>]. Our guidelines address this gap by offering operational recommendations that emphasize transparency, stakeholder engagement, and context-specific risk assessment [<xref ref-type="bibr" rid="ref25">25</xref>]. Finally, we shared our evidence synthesis with the guideline committee alongside our draft recommendations.</p></sec><sec id="s4"><title>Topic Selection and Development of Recommendations</title><p>We used a modified Delphi approach to develop our recommendations, consisting of two iterative rounds. An initial list of relevant topics for ML applications in population and public health was compiled by SD and SB, based on a review of gray literature, academic publications, current practice reports, and consultations with the study team.</p><p>In round 1, this list was refined during three virtual workshops held on November 24, 2022, December 9, 2022, and December 13, 2022. These sessions focused on ethics and equity, technical considerations, and knowledge mobilization. During the workshops, participants assessed draft recommendations using a modified Likert scale and provided written feedback. SB and SD analyzed the quantitative scores and qualitative input to identify consensus and areas of divergence.</p><p>In round 2, we circulated the revised draft recommendations to the panel in advance of a follow-up virtual meeting. Panel members discussed the content and submitted additional written feedback. We incorporated these inputs to finalize the recommendations, drawing on frameworks such as GRADE and other relevant guideline development methodologies.</p><p>The final recommendations were organized into five thematic categories: (1) equity, diversity, and inclusion; (2) public health emergencies, deidentified population data use, and consent; (3) due diligence during model conceptualization and early development; (4) technical transparency, consistency, and data management; and (5) knowledge mobilization to the public, ML experts, and public health professionals.</p></sec><sec id="s5"><title>Recommendations</title><sec id="s5-1"><title>Recommendation 1: Prioritize Partnerships and Interventions That Support Communities That Are Disadvantaged by Social and Economic Policies</title><p>This includes activities such as algorithmic bias mitigation, capacity building, and fostering equitable representation. Partnerships should not only engage diverse collaborators with expertise in ethical AI and public health but also invest in building local capacity to support sustainable and responsible ML deployment in diverse settings.</p><p>ML models may be prone to various types of bias, which can significantly impact the health and well-being of communities made vulnerable by social and economic policies. ML approaches that rely on data from internet searches or social media may lack representativeness, tending to exhibit a bias toward processing data from individuals with higher socioeconomic status, from certain age groups, or those living in urban areas rather than remote or rural areas [<xref ref-type="bibr" rid="ref6">6</xref>]. Similarly, ML models used in population health often encounter missing or nonrepresentative data when relying on electronic medical records or public health data, which may not fully capture the diversity of the communities they aim to serve [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>].</p><p>Members of diverse community groups, including communities disadvantaged by social and economic policies, bring a variety of perspectives and lived experiences to the table. These perspectives and insights must be actively prioritized in decision-making to identify and mitigate algorithm-related harms effectively [<xref ref-type="bibr" rid="ref30">30</xref>]. Such experiences are crucial for meaningful research and transformative change within institutions. Expert consultation, while valuable, should not replace the direct involvement of people with lived experience; instead, it should complement it [<xref ref-type="bibr" rid="ref31">31</xref>]. Unfortunately, those who stand to benefit most from fair and responsible ML applications are also those most at risk of algorithmic harm due to systemic biases and exclusion [<xref ref-type="bibr" rid="ref32">32</xref>].</p><p>Prioritizing equity in ML requires balancing ethical considerations with technical feasibility and real-world implementation challenges. While transparency is critical for trust and accountability, full transparency can be resource intensive and difficult to achieve in low- and middle-income countries (LMICs), where data infrastructure and regulatory oversight may be limited. Similarly, while equity-driven approaches aim to reduce algorithmic harms, they require sustained investment in local expertise, which may not always be immediately available. Feasibility must therefore be considered at each stage, ensuring that solutions are both ethically sound and realistically implementable in diverse public health settings.</p><p>In LMICs, where these guidelines are particularly relevant, implementing ML partnerships and equity-focused interventions is often constrained by financial limitations, workforce shortages, and fragmented health data systems. Ensuring that ML applications promote equity while remaining feasible requires balancing ethical considerations with practical constraints. Many public health institutions in LMICs operate with competing priorities, making it challenging to allocate resources solely toward algorithmic transparency, bias mitigation, or extensive auditing processes. Policymakers, health care organizations, and ML developers in these settings must adopt scalable, context-aware approaches that allow for phased implementation. Resource-limited settings can start with standard approaches to data definition and extraction, streamlined documentation, internal monitoring, and bias assessments targeted at high-risk applications where disparities are most pronounced. Furthermore, the literature from LMICs emphasizes the need for tailored implementation approaches. In Africa, for example, ongoing discussions have identified key priorities such as strengthening electricity and internet infrastructure [<xref ref-type="bibr" rid="ref33">33</xref>], expanding the data science workforce through increased educational opportunities [<xref ref-type="bibr" rid="ref33">33</xref>], leveraging smartphone-based AI apps [<xref ref-type="bibr" rid="ref33">33</xref>], and developing AI frameworks that reflect regional needs [<xref ref-type="bibr" rid="ref34">34</xref>]. In addition, fostering multisectoral partnerships and policy initiatives has been recommended as a way to incentivize and support responsible AI adoption [<xref ref-type="bibr" rid="ref35">35</xref>].</p><p>The trade-offs between transparency, equity, and operational feasibility must also be carefully managed. While transparency fosters trust and accountability, excessive documentation requirements or mandatory external reviews may slow the deployment of ML-driven health interventions, particularly in rapidly evolving crises such as infectious disease outbreaks. In such cases, prioritizing community engagement, establishing advisory panels with representatives from populations considered marginalized, and implementing practical bias mitigation strategies such as integrating socioeconomic variables into model adjustments can ensure ethical safeguards while maintaining efficiency. Flexible regulatory frameworks that accommodate local resource constraints can allow LMICs to adopt ML responsibly without overwhelming already burdened health care infrastructures.</p><p>Partnering directly with communities can help address these gaps. This includes building trust, capacity building, and advancing representation [<xref ref-type="bibr" rid="ref32">32</xref>]. Bias in ML models can stem from various sources, including data collection, label selection, and feature inclusion. Stakeholders from communities considered disadvantaged should be engaged not only to address data biases but also to guide decisions on selecting fair labels and features. For example, using health care expenditure as a proxy for need can introduce bias if spending patterns differ across communities due to economic disparities. Direct involvement of community members can also help identify potential biases in data sources and variables, as well as develop more equitable models. For instance, in the development of predictive models for health care access, community insights can reveal overlooked barriers, such as transport challenges or financial constraints. To mitigate this, ML users should prioritize diverse and representative datasets, monitor error rates and performance levels across different patient groups, and consider the downstream implications (eg, potential impacts on health care access and quality of public health interventions) [<xref ref-type="bibr" rid="ref32">32</xref>].</p><p>True partnership goes beyond consultation; it requires ongoing collaboration and investment in local expertise. Many LMICs and resource-constrained settings lack appropriate safeguards, technical expertise, and infrastructure to implement ML tools effectively. Addressing these gaps requires more than just ethical guidelines. It demands direct investment in skills, resources, and institutional support. Practical strategies to foster capacity building include mentorship programs that connect ML experts in high-income countries with local researchers and policymakers to support training and facilitate knowledge exchange. For example, codeveloping open-access educational resources tailored to public health professionals can help increase AI literacy. Sharing technical resources, such as code and cloud-based computing capacity, can support implementation, provided these efforts are led by LMIC stakeholders. Additional strategies include investing in affordable and scalable ML tools designed for LMICs to ensure usability even with limited infrastructure and fostering partnerships between public health institutions, universities, and community organizations to bridge knowledge gaps. By prioritizing these efforts, ML tools can be developed not only with but also by the communities they aim to serve.</p><p>Ultimately, capacity building is a long-term commitment that requires sustained funding, institutional support, and local leadership. While external collaborations provide valuable expertise, true sustainability depends on empowering local communities to take ownership of ML initiatives. This ensures that ML applications in public health are not only effective and transparent but also grounded in the realities and needs of the populations they serve [<xref ref-type="bibr" rid="ref36">36</xref>].</p></sec><sec id="s5-2"><title>Recommendation 2: Use ML in Public Health Emergencies and Other Dynamic, Fast-Paced Situations by Collecting, Analyzing, and Using Population-Wide Deidentified Data</title><p>ML can be useful in public health emergencies and other dynamic, fast-paced situations by collecting, analyzing, and using population-wide deidentified data. This process of information gathering and manipulation of deidentified data can be carried out without consent, provided that ethical safeguards are in place and risks related to privacy, misuse, and transparency are actively mitigated.</p><p>ML plays a crucial role in gathering, reporting, and analyzing information rapidly, which is vital for mitigating further health damage in dynamic, fast-paced situations such as public health emergencies. According to the United Nations&#x2019; Sendai framework, reducing disaster risk involves understanding situational risk, strengthening governance, improving preparedness for an effective response, and allocating resources toward measures that can enhance resilience [<xref ref-type="bibr" rid="ref37">37</xref>]. Prediction models and protocols, such as evacuation planning, have been used to alleviate the adverse effects of such emergencies. ML has greatly enhanced the ability to monitor information and make timely decisions during emergencies. This has enabled disease outbreak prediction, improved evacuation planning, and optimized the distribution of resources to areas in need [<xref ref-type="bibr" rid="ref27">27</xref>].</p><p>During a public health emergency, the rapid collection of deidentified data without consent may be necessary to ensure timely responses. However, this urgency must be balanced with the potential risks of data misuse, privacy breaches, and lack of accountability, particularly in regions with weaker institutional safeguards. In LMICs and other resource-constrained settings, insufficient regulatory oversight may increase the risk of unauthorized access, data exploitation, or reidentification of individuals.</p><p>To minimize these risks while maintaining the efficiency of ML-driven public health responses, organizations and governments should adopt structured ethical frameworks that ensure transparency, accountability, and proportionality in data use. Principles such as FAIR (Findable, Accessible, Interoperable, and Reusable) and CARE (Collective Benefit, Authority to Control, Responsibility, and Ethics) offer structured guidelines for ethical data collection and governance and thus can be used in contexts where regulatory oversight is limited. The FAIR principles focus on making health data widely accessible and reusable, which is essential for rapid public health responses while maintaining data integrity and security. The CARE principles, on the other hand, ensure that data collection respects collective benefit and community authority, particularly in Indigenous communities and those considered marginalized, preventing exploitative data use [<xref ref-type="bibr" rid="ref38">38</xref>]. In addition to regulatory measures, capacity-building efforts should prioritize the establishment of independent data governance bodies within LMICs to oversee ethical ML implementation. Encouraging multistakeholder collaborations, including engagement with civil society organizations and the communities affected, can further enhance accountability. By embedding these safeguards, public health responses can remain rapid and effective without compromising ethical considerations, even in regions with limited regulatory infrastructure.</p><p>Similarly, determining whether individuals or populations would be willing to disclose certain information may be complex. Most people and organizations recognize that some degree of data collection without consultation may be necessary. During the COVID-19 pandemic, researchers effectively used deidentified census data to identify high-risk neighborhoods without individual consent [<xref ref-type="bibr" rid="ref39">39</xref>]. This approach can serve as a model for using public health data during emergencies while adhering to ethical standards. Ethical frameworks related to consent, privacy of information, and their implications for population and public health, as well as crisis management, can help evaluate the ethical feasibility of proposed solutions, even if they do not address all aspects of the current situation. In their development of an ethical data access framework, Lusignan et al [<xref ref-type="bibr" rid="ref40">40</xref>] outline that structuring collective as well as organizational thinking and decision-making regarding what is and is not appropriate within a crisis early on makes for improved trust between the data providers and recipients, as expectations may be collaboratively set, negotiated, and ideally fulfilled or exceeded. To accelerate the use of deidentified data in public health responses, ethical frameworks should allow faster-access pathways for researchers who can demonstrate secure data handling and infrastructure. This could involve streamlined approvals for projects with proven privacy measures.</p><p>Ideally, this process should occur during the interpandemic phase, using inclusive structures and governance models with oversight powers. This process should determine whether the benefits to society, such as reducing long-term loss of life and health, outweigh the trade-offs involving public interest, privacy breaches, and ethical considerations. To ensure the enforcement of these guidelines in real-world scenarios, particularly where regulatory oversight is lacking, we propose several mechanisms. One approach is to integrate these guidelines into existing public health governance structures, ensuring alignment with established ethical and legal frameworks [<xref ref-type="bibr" rid="ref41">41</xref>]. In addition, public health institutions and regulatory bodies could establish independent review panels to assess compliance with these ML guidelines [<xref ref-type="bibr" rid="ref42">42</xref>]. Another enforcement mechanism could be the development of accreditation or certification programs for ML applications in public health, which would require adherence to ethical and transparency standards [<xref ref-type="bibr" rid="ref43">43</xref>].</p></sec><sec id="s5-3"><title>Recommendation 3: Ensure Fairness Across Populations and Mitigate Bias</title><p>The level of bias mitigation should be proportional to the context and potential impact of the model, ensuring that even low-risk applications maintain essential fairness safeguards. All aspects of the model&#x2019;s implementation should be assessed, including inherent, external, or incompletely known factors that could contribute to bias.</p><p>Risk assessment is central to any innovation, but it is especially important for ML, which is prone to certain liabilities [<xref ref-type="bibr" rid="ref44">44</xref>]. The use of algorithms in health care can perpetuate racial biases due to existing disparities in health care delivery. When designing algorithms to predict health outcomes based on genetic findings, bias may arise if there is limited or no research conducted in certain populations. For instance, using data from the Framingham Heart Study to predict cardiovascular risk in non-White populations has led to biased results, overestimating or underestimating the risk [<xref ref-type="bibr" rid="ref45">45</xref>].</p><p>To clarify how risk relates to bias mitigation, risk should be defined based on the potential for harm or inequity, particularly to populations considered vulnerable, rather than solely on model complexity or scope. Risk assessment should consider the model&#x2019;s intended use, the sensitivity of the outcome (eg, health care access or resource allocation), and the potential for systemic harm if biases are present. Importantly, a lower risk designation should never justify a lax approach to bias mitigation. Instead, risk levels should guide the <italic>type</italic> of mitigation strategies deployed, with simpler models undergoing appropriate fairness checks and more complex, high-impact models requiring comprehensive audits and subgroup analyses.</p><p>Given the wide range of ML models, conducting situation-specific risk assessments, especially for populations that may be vulnerable due to social or economic policies, is essential. We need to develop protocols to ensure fairness across diverse populations, using metrics such as subgroup performance, false-positive or false-negative rates, and bias-specific checks (eg, checking for overrepresentation or underrepresentation of particular groups). Transparency should extend to data sources, model methodologies, and decisions made during development. Providing accessible documentation, including technical reports and simplified explanations, can help both technical and nontechnical stakeholders understand potential biases and their mitigation [<xref ref-type="bibr" rid="ref32">32</xref>]. Transparency should highlight how different populations are represented in training data and how model outputs vary among them. For example, models predicting health care access should clearly show performance disparities between urban and rural populations.</p><p>Transparency, explainability, and interpretability of ML models are critical for stakeholder trust and decision-making. Transparency refers to how openly the model&#x2019;s design, data sources, and decision-making processes are shared, helping stakeholders understand how the model operates. Efforts should be made to improve the transparency of models, such as through clear explanations of how inputs influence outputs, visual aids, and plain language summaries. Explainability focuses on making specific predictions understandable, clarifying why a model produced a particular output through methods such as feature importance analysis or decision trees. Interpretability relates to how easily a human can understand the model&#x2019;s internal logic or decision-making process, which is often linked to model simplicity or the use of interpretable techniques such as linear regression or rule-based systems. Altogether, these concepts ensure that ML models are accessible, trustworthy, and accountable to both technical and nontechnical stakeholders.</p><p>For example, using interpretable models in clinical settings can help physicians understand and validate AI recommendations, enhancing adoption and reliability. ML is sometimes portrayed as a technology requiring advanced training to understand, leading it to both hopeful expectations and suspicion. It is important to clearly communicate the inherent and associated risks of the model and define what is meant by ML in its context [<xref ref-type="bibr" rid="ref32">32</xref>]. When ML-related technologies and their risks are explained in plain language, public distrust tends to decrease [<xref ref-type="bibr" rid="ref46">46</xref>].</p></sec><sec id="s5-4"><title>Recommendation 4: Ensure Public Availability of Data Sources, Model Methodologies, and Technical Details, Along With Bias Mitigation Strategies, Used in the Contexts of Models to Promote Transparency, Reproducibility, and Trustworthiness Across ML Studies</title><p>Providing accessible information about the technical aspects of an ML study or solution, such as its data sources, population, characteristics, and model variables, along with detailed descriptions of its methodology and the deidentified datasets used, supports reproducibility, bias mitigation, and trustworthiness [<xref ref-type="bibr" rid="ref32">32</xref>]. However, ensuring public availability of data must be balanced with the sensitivity of health data, privacy regulations, and proprietary constraints. To promote meaningful transparency, a structured approach is necessary, one that goes beyond simply sharing code and considers regulatory, ethical, and contextual limitations. Drawing on European Union legislation and literature in computer science, Kiselva and De Hert [<xref ref-type="bibr" rid="ref47">47</xref>] suggest that transparency must be seen as a fundamental &#x201C;way of thinking&#x201D; and an all-encompassing concept that characterizes the process of developing and using AI.</p><p>Achieving transparency requires balancing openness with the risks of reidentification, legal restrictions, and proprietary interests. While transparency enhances trust in ML models, full public disclosure of health data is not always feasible due to privacy laws such as the Health Insurance Portability and Accountability Act (HIPAA) and General Data Protection Regulation (GDPR), institutional data-sharing policies, and concerns over commercial confidentiality.</p><p>In public health contexts, transparency should function as a system of accountability rather than unrestricted access to all datasets. ML documentation should be structured to ensure usability while maintaining compliance with privacy and proprietary safeguards. This includes providing methodological details and bias mitigation strategies, ensuring that plain language summaries are available for nontechnical stakeholders, and adopting tiered-access models where sensitive data can be reviewed under controlled conditions rather than being made fully public. The need for transparency should also be weighed against the practical barriers to data sharing. Health data often originate from multiple institutions with varying governance policies, making harmonization difficult. In addition, proprietary models developed by industry partners may involve intellectual property protections that restrict full disclosure of methodologies. Addressing these challenges requires the development of standardized documentation and data governance agreements that balance the need for public trust with confidentiality concerns.</p><p>To enhance data sharing, transparency, and bias mitigation, dataset development protocols should align with existing frameworks to integrate new and prior information. Data used for model training, validation, or implementation should be of high quality (eg, completeness, source consistency, and linkage potential) and consistently accessible and updated, especially data related to employment, education, occupation, other socioeconomic status factors, and health inequalities [<xref ref-type="bibr" rid="ref48">48</xref>]. In applications of linked data and ML in the health sciences (eg, to estimate population-level health indicators), bias can arise from nonstandard data collection methodologies. Developing standardized protocols for all data sources used in each project and including variance assessment and context-appropriate handling (eg, oversampling, imputation) in bias mitigation strategies are essential. For example, in response to bias concerns during the COVID-19 pandemic, ML models were adjusted to incorporate sociodemographic data to improve accuracy in predicting infection spread among populations considered marginalized [<xref ref-type="bibr" rid="ref49">49</xref>]. Another successful example includes the use of algorithmic audits in health care AI applications, where independent researchers identified and mitigated racial biases in predictive models used for hospital readmission rates [<xref ref-type="bibr" rid="ref50">50</xref>]. Transparency should include public documentation of these bias mitigation strategies while ensuring that sensitive details remain protected.</p><p>Assessing bias and performance across subpopulations is another essential element of responsible transparency. For public health surveillance, sensitivity may be prioritized to avoid missing cases, whereas models used for treatment decisions may emphasize specificity to minimize unnecessary interventions. Metrics such as area under the curve, positive predictive value, and user satisfaction should be evaluated across different subpopulations to detect and correct bias. It is equally important to identify variance in datasets, as high variance can exacerbate bias in ML models. Underfitting, which occurs when a model fails to capture important patterns, and overfitting, where a model becomes hypersensitive to minor fluctuations in data, should be addressed through continuous model tuning and hyperparameter optimization.</p><p>In addition to predeployment fairness checks, postdeployment monitoring should include regular performance evaluations across diverse subpopulations to detect bias drift, which can emerge as population characteristics change. Bias drift occurs when the model&#x2019;s predictive performance deteriorates or produces skewed results over time, often reflecting shifts in population demographics or health care access patterns. We also highlight the importance of model update protocols incorporating community feedback loops, participatory audits, and transparent reporting of adjustments to ensure that updates are responsive and equitable.</p><p>We recommend that organizations implement structured, iterative audit cycles to support these processes. These cycles should include predeployment fairness checks using simulated population data and postdeployment ethical audits conducted at regular intervals or following significant model updates. In addition, establishing rapid-response mechanisms to address emerging ethical concerns during deployment can help maintain accountability. Incorporating stakeholder feedback from impacted communities through participatory reviews ensures that evaluations remain inclusive and socially responsive.</p></sec><sec id="s5-5"><title>Recommendation 5: Facilitate Regular, Multidisciplinary Discussions Involving ML Developers, Public Health Professionals, Ethicists, and Community Representatives to Identify Biases, Ensure Fair Implementation, and Increase Transparency</title><p>Provide plain language summaries and guidelines that are consistent in terminology to raise awareness among both the public and experts about ML-related bias and debiasing.</p><p>Efforts to raise public awareness about ML and its benefits should be balanced with accurate information about potential harms that may be associated with its implementation. In a study by Musbahi et al [<xref ref-type="bibr" rid="ref51">51</xref>], the views of patients and the public about AI in health care were analyzed. The top 5 concerns included decreased human interaction, data security, obtaining consent for data use, errors in AI systems, and the potential irrelevance of AI in health care. Despite factors promoting the adoption of technology in health care settings, achieving sustainable implementation remains a challenge. Providing information to address individual concerns about the safety and effectiveness of ML models is essential. Public disclosure and scientific interrogation of potentially harmful occurrences or risks associated with ML, including unintentional biases, build trust with the public. Efforts to raise awareness should use accessible language and incorporate research best practices (eg, consistent terminology, structured abstracts, and established reporting guidelines) to make technical concepts easier to understand for a wider audience [<xref ref-type="bibr" rid="ref46">46</xref>]. This approach not only raises awareness but also fosters trust among nontechnical stakeholders, such as community members and health care practitioners. Past multidisciplinary engagements have proven valuable in addressing bias and transparency concerns in ML applications. For instance, the World Health Organization convened multistakeholder panels to assess AI-based disease surveillance tools, leading to refined ethical guidelines [<xref ref-type="bibr" rid="ref52">52</xref>]. Similarly, the Canadian AI for Public Health initiative facilitated workshops bringing together data scientists, epidemiologists, and policymakers to discuss bias mitigation strategies in ML-based public health interventions [<xref ref-type="bibr" rid="ref53">53</xref>]. A structured approach to future engagements could include regular stakeholder summits, interdisciplinary task forces, and community-centered consultations.</p></sec></sec><sec id="s6"><title>Summary</title><p>ML is rapidly advancing and holds potential for uses to improve the health of individuals and communities. However, these efforts must prioritize equity. Model developers, statisticians, epidemiologists, public health professionals, policymakers, and funders must collaborate to ensure that ML implementations avoid prejudice and discrimination while also enhancing human capabilities, connections, and knowledge in health and disease contexts. This approach aligns with the ethical integration of AI in health [<xref ref-type="bibr" rid="ref54">54</xref>].</p><p>Our recommendations align with existing frameworks. In the United States, our guidelines support the principles outlined in the Executive Order on Safe, Secure, and Trustworthy AI, which emphasizes the importance of equity, privacy safeguards, and risk mitigation in health-related AI applications [<xref ref-type="bibr" rid="ref55">55</xref>]. Similarly, our focus on transparency and bias mitigation aligns with the European Union&#x2019;s AI Act and GDPR, which collectively promote ethical AI deployment, data protection, and algorithmic accountability [<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref57">57</xref>]. Our recommendations complement the UK AI Regulatory Principles, which emphasize fairness, accountability, and transparency, particularly in public sector applications [<xref ref-type="bibr" rid="ref58">58</xref>]. Finally, our guidelines build upon principles from the Pan-Canadian Artificial Intelligence Strategy [<xref ref-type="bibr" rid="ref59">59</xref>], which highlights equity, interdisciplinarity, and inclusivity as core pillars of responsible AI adoption in public health. These country-specific frameworks emphasize the importance of adapting ethical ML guidelines to regional regulatory and social contexts, while ensuring global alignment on core principles such as equity, transparency, and data privacy. Our recommendations add to these discussions by offering a public health&#x2013;specific lens, focusing on bias mitigation in population-level models, fostering community partnerships, and emphasizing the impact of social determinants of health in ML implementation.</p><p>We had a diverse range of disciplines and approaches represented in our team, including AI, ML, computer science, population health, epidemiology, ethics, and AI applications in health. A limitation of this work was that we have adapted methodologies initially designed for developing clinical practice guidelines, as there is no clear guidance for developing ML applications in population and public health. In LMICs, there might be limited resources available for adequately monitoring and documenting public health interventions that use ML. While our recommendations aim to promote equity and enhance the integration of ML models in LMICs for population and public health purposes, they may not cover all possible use cases. Implementing these guidelines in diverse settings presents several challenges, including variations in data quality, resource availability, and stakeholder engagement. Addressing these challenges is essential to ensure that ML models enhance public health outcomes equitably.</p></sec><sec id="s7"><title>Limitations</title><p>Some key limitations include difficulties in engaging communities considered disadvantaged, ensuring data representativeness, and maintaining transparency with limited resources. Operationalizing these recommendations will require adaptable protocols, increased local capacity, and collaborative efforts. For example, developing simplified, context-specific inclusivity checklists and leveraging international partnerships can help overcome resource gaps. Ultimately, ongoing evaluation and stakeholder input are crucial for refining these guidelines and ensuring their effectiveness across varied public health contexts.</p></sec><sec id="s8" sec-type="conclusions"><title>Conclusions</title><p>Without adequate bias prevention and mitigation during ML model development and implementation, ML applications in population and public health contexts could worsen existing health disparities or even contribute to new ones. Similar to the development of health policy aimed at promoting equity, it is crucial to carefully assess ML innovations before, during, and after their development and deployment. This ensures that model design and delivery are equitable and based on data representing all groups. Achieving equity requires adhering to ethical principles of equity, transparency, and engagement, as well as multidisciplinary efforts involving both model developers and population health practitioners committed to bias prevention and mitigation standards. Monitoring the outcomes of such adherence, including the guidelines proposed here, can promote less biased use of ML to inform policy. Future work should include piloting these guidelines in a range of public health settings, including LMICs, and developing practical tools to assess their effectiveness. Evaluation efforts could involve tracking the diversity of stakeholders involved in model development and testing, measuring improvements in model accuracy for underrepresented groups, and assessing whether decisions based on these models contribute to more equitable health outcomes. Ongoing assessment and adaptation of these guidelines will be crucial for ensuring responsible and fair use of ML in public health. ML is not just a rapidly evolving technology but also a tool that can promote equity in population and public health if a commitment to mitigating bias is maintained by all stakeholders involved in its design and delivery.</p></sec></body><back><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CARE</term><def><p>Collective Benefit, Authority to Control, Responsibility, and Ethics</p></def></def-item><def-item><term id="abb3">FAIR</term><def><p>Findable, Accessible, Interoperable, and Reusable</p></def></def-item><def-item><term id="abb4">GDPR</term><def><p>General Data Protection Regulation</p></def></def-item><def-item><term id="abb5">GRADE</term><def><p>Grading of Recommendations Assessment, Development and Evaluation</p></def></def-item><def-item><term id="abb6">HIPAA</term><def><p>Health Insurance Portability and Accountability Act</p></def></def-item><def-item><term id="abb7">LMIC</term><def><p>low- and middle-income country</p></def></def-item><def-item><term id="abb8">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb9">NCD</term><def><p>noncommunicable disease</p></def></def-item><def-item><term id="abb10">NICE</term><def><p>National Institute for Health and Care Excellence</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Beam</surname><given-names>AL</given-names> </name><name name-style="western"><surname>Kohane</surname><given-names>IS</given-names> </name></person-group><article-title>Big data and machine learning in health care</article-title><source>JAMA</source><year>2018</year><month>04</month><day>3</day><volume>319</volume><issue>13</issue><fpage>1317</fpage><lpage>1318</lpage><pub-id pub-id-type="doi">10.1001/jama.2017.18391</pub-id><pub-id pub-id-type="medline">29532063</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Friedman</surname><given-names>DJ</given-names> </name><name name-style="western"><surname>Starfield</surname><given-names>B</given-names> </name></person-group><article-title>Models of population health: their value for US public health practice, policy, and research</article-title><source>Am J Public Health</source><year>2003</year><month>03</month><volume>93</volume><issue>3</issue><fpage>366</fpage><lpage>369</lpage><pub-id pub-id-type="doi">10.2105/ajph.93.3.366</pub-id><pub-id pub-id-type="medline">12604473</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>James</surname><given-names>G</given-names> </name><name name-style="western"><surname>Witten</surname><given-names>D</given-names> </name><name name-style="western"><surname>Hastie</surname><given-names>T</given-names> </name><name name-style="western"><surname>Tibshirani</surname><given-names>R</given-names> </name></person-group><source>An Introduction to Statistical Learning</source><year>2013</year><publisher-name>Springer</publisher-name><pub-id pub-id-type="other">9781461471370</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Atasoy</surname><given-names>H</given-names> </name><name name-style="western"><surname>Greenwood</surname><given-names>BN</given-names> </name><name name-style="western"><surname>McCullough</surname><given-names>JS</given-names> </name></person-group><article-title>The digitization of patient care: a review of the effects of electronic health records on health care quality and utilization</article-title><source>Annu Rev Public Health</source><year>2019</year><month>04</month><day>1</day><volume>40</volume><fpage>487</fpage><lpage>500</lpage><pub-id pub-id-type="doi">10.1146/annurev-publhealth-040218-044206</pub-id><pub-id pub-id-type="medline">30566385</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lavigne</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mussa</surname><given-names>F</given-names> </name><name name-style="western"><surname>Creatore</surname><given-names>MI</given-names> </name><name name-style="western"><surname>Hoffman</surname><given-names>SJ</given-names> </name><name name-style="western"><surname>Buckeridge</surname><given-names>DL</given-names> </name></person-group><article-title>A population health perspective on artificial intelligence</article-title><source>Healthc Manage Forum</source><year>2019</year><month>07</month><volume>32</volume><issue>4</issue><fpage>173</fpage><lpage>177</lpage><pub-id pub-id-type="doi">10.1177/0840470419848428</pub-id><pub-id pub-id-type="medline">31106580</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aiello</surname><given-names>AE</given-names> </name><name name-style="western"><surname>Renson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Zivich</surname><given-names>PN</given-names> </name></person-group><article-title>Social media- and internet-based disease surveillance for public health</article-title><source>Annu Rev Public Health</source><year>2020</year><month>04</month><day>2</day><volume>41</volume><issue>1</issue><fpage>101</fpage><lpage>118</lpage><pub-id pub-id-type="doi">10.1146/annurev-publhealth-040119-094402</pub-id><pub-id pub-id-type="medline">31905322</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Silva</surname><given-names>KD</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>WK</given-names> </name><name name-style="western"><surname>Forbes</surname><given-names>A</given-names> </name><name name-style="western"><surname>Demmer</surname><given-names>RT</given-names> </name><name name-style="western"><surname>Barton</surname><given-names>C</given-names> </name><name name-style="western"><surname>Enticott</surname><given-names>J</given-names> </name></person-group><article-title>Use and performance of machine learning models for type 2 diabetes prediction in community settings: a systematic review and meta-analysis</article-title><source>Int J Med Inform</source><year>2020</year><month>11</month><volume>143</volume><fpage>104268</fpage><pub-id pub-id-type="doi">10.1016/j.ijmedinf.2020.104268</pub-id><pub-id pub-id-type="medline">32950874</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Allem</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Ferrara</surname><given-names>E</given-names> </name><name name-style="western"><surname>Uppu</surname><given-names>SP</given-names> </name><name name-style="western"><surname>Cruz</surname><given-names>TB</given-names> </name><name name-style="western"><surname>Unger</surname><given-names>JB</given-names> </name></person-group><article-title>E-Cigarette surveillance with social media data: social bots, emerging topics, and trends</article-title><source>JMIR Public Health Surveill</source><year>2017</year><month>12</month><day>20</day><volume>3</volume><issue>4</issue><fpage>e98</fpage><pub-id pub-id-type="doi">10.2196/publichealth.8641</pub-id><pub-id pub-id-type="medline">29263018</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="web"><article-title>AI, machine learning and the potential impacts on the practice of family medicine</article-title><source>AMS</source><access-date>2025-10-01</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.ams-inc.on.ca/resource/cfpc-briefing-paper/">https://www.ams-inc.on.ca/resource/cfpc-briefing-paper/</ext-link></comment></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ghassemi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Naumann</surname><given-names>T</given-names> </name><name name-style="western"><surname>Schulam</surname><given-names>P</given-names> </name><name name-style="western"><surname>Beam</surname><given-names>AL</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>IY</given-names> </name><name name-style="western"><surname>Ranganath</surname><given-names>R</given-names> </name></person-group><article-title>Practical guidance on artificial intelligence for health-care data</article-title><source>Lancet Digit Health</source><year>2019</year><month>08</month><volume>1</volume><issue>4</issue><fpage>e157</fpage><lpage>e159</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(19)30084-6</pub-id><pub-id pub-id-type="medline">33323184</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wilder</surname><given-names>J</given-names> </name><name name-style="western"><surname>Saraswathula</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hasselblad</surname><given-names>V</given-names> </name><name name-style="western"><surname>Muir</surname><given-names>A</given-names> </name></person-group><article-title>A systematic review of race and ethnicity in Hepatitis C clinical trial enrollment</article-title><source>J Natl Med Assoc</source><year>2016</year><month>02</month><volume>108</volume><issue>1</issue><fpage>24</fpage><lpage>29</lpage><pub-id pub-id-type="doi">10.1016/j.jnma.2015.12.004</pub-id><pub-id pub-id-type="medline">26928485</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Buckeridge</surname><given-names>DL</given-names> </name></person-group><article-title>Precision, equity, and public health and epidemiology informatics - a scoping review</article-title><source>Yearb Med Inform</source><year>2020</year><month>08</month><volume>29</volume><issue>1</issue><fpage>226</fpage><lpage>230</lpage><pub-id pub-id-type="doi">10.1055/s-0040-1701989</pub-id><pub-id pub-id-type="medline">32823320</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>R&#x00F6;&#x00F6;sli</surname><given-names>E</given-names> </name><name name-style="western"><surname>Rice</surname><given-names>B</given-names> </name><name name-style="western"><surname>Hernandez-Boussard</surname><given-names>T</given-names> </name></person-group><article-title>Bias at warp speed: how AI may contribute to the disparities gap in the time of COVID-19</article-title><source>J Am Med Inform Assoc</source><year>2021</year><month>01</month><day>15</day><volume>28</volume><issue>1</issue><fpage>190</fpage><lpage>192</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocaa210</pub-id><pub-id pub-id-type="medline">32805004</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="web"><person-group person-group-type="editor"><name name-style="western"><surname>Sch&#x00FC;nemann</surname><given-names>H</given-names> </name><name name-style="western"><surname>Bro&#x017C;ek</surname><given-names>J</given-names> </name><name name-style="western"><surname>Guyatt</surname><given-names>G</given-names> </name><name name-style="western"><surname>Oxman</surname><given-names>A</given-names> </name></person-group><article-title>GRADE handbook for grading quality of evidence and strength of recommendations</article-title><source>Grading of Recommendations Assessment, Development and Evaluation</source><year>2013</year><access-date>2025-10-01</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://gdt.gradepro.org/app/handbook/handbook.html">https://gdt.gradepro.org/app/handbook/handbook.html</ext-link></comment></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="web"><article-title>Developing NICE guidelines: the manual</article-title><source>National Institute for Health and Care Excellence</source><year>2014</year><access-date>2025-10-01</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.nice.org.uk/media/default/about/what-we-do/our-programmes/developing-nice-guidelines-the-manual.pdf">https://www.nice.org.uk/media/default/about/what-we-do/our-programmes/developing-nice-guidelines-the-manual.pdf</ext-link></comment></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Caro</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Briggs</surname><given-names>AH</given-names> </name><name name-style="western"><surname>Siebert</surname><given-names>U</given-names> </name><name name-style="western"><surname>Kuntz</surname><given-names>KM</given-names> </name></person-group><article-title>Modeling good research practices&#x2014;overview</article-title><source>Med Decis Making</source><year>2012</year><month>09</month><volume>32</volume><issue>5</issue><fpage>667</fpage><lpage>677</lpage><pub-id pub-id-type="doi">10.1177/0272989X12454577</pub-id><pub-id pub-id-type="medline">22990082</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Eddy</surname><given-names>DM</given-names> </name><name name-style="western"><surname>Hollingworth</surname><given-names>W</given-names> </name><name name-style="western"><surname>Caro</surname><given-names>JJ</given-names> </name><etal/></person-group><article-title>Model transparency and validation: a report of the ISPOR-SMDM Modeling Good Research Practices Task Force--7</article-title><source>Value Health</source><year>2012</year><volume>15</volume><issue>6</issue><fpage>843</fpage><lpage>850</lpage><pub-id pub-id-type="doi">10.1016/j.jval.2012.04.012</pub-id><pub-id pub-id-type="medline">22999134</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>NT</given-names> </name><name name-style="western"><surname>Resnick</surname><given-names>P</given-names> </name><name name-style="western"><surname>Barton</surname><given-names>G</given-names> </name></person-group><article-title>Algorithmic bias detection and mitigation: best practices and policies to reduce consumer harms</article-title><source>Brookings Institution</source><year>2019</year><access-date>2025-10-01</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.brookings.edu/articles/algorithmic-bias-detection-and-mitigation-best-practices-and-policies-to-reduce-consumer-harms/">https://www.brookings.edu/articles/algorithmic-bias-detection-and-mitigation-best-practices-and-policies-to-reduce-consumer-harms/</ext-link></comment></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Paprica</surname><given-names>PA</given-names> </name><name name-style="western"><surname>Sutherland</surname><given-names>E</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Essential requirements for establishing and operating data trusts</article-title><source>Int J Popul Data Sci</source><year>2020</year><volume>5</volume><issue>1</issue><pub-id pub-id-type="doi">10.23889/ijpds.v5i1.1353</pub-id><pub-id pub-id-type="medline">33644412</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="web"><article-title>Montreal declaration on responsible AI</article-title><source>Montr&#x00E9;al Declaration</source><year>2018</year><access-date>2019-01-24</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.montrealdeclaration-responsibleai.com/">https://www.montrealdeclaration-responsibleai.com/</ext-link></comment></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Courtland</surname><given-names>R</given-names> </name></person-group><article-title>Bias detectives: the researchers striving to make algorithms fair</article-title><source>Nature New Biol</source><year>2018</year><month>06</month><volume>558</volume><issue>7710</issue><fpage>357</fpage><lpage>360</lpage><pub-id pub-id-type="doi">10.1038/d41586-018-05469-3</pub-id><pub-id pub-id-type="medline">29925973</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="web"><article-title>Ethics and governance of artificial intelligence for health: WHO guidance</article-title><source>World Health Organization</source><access-date>2023-04-05</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.who.int/publications/i/item/9789240029200">https://www.who.int/publications/i/item/9789240029200</ext-link></comment></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wiens</surname><given-names>J</given-names> </name><name name-style="western"><surname>Price</surname><given-names>WN</given-names> </name><name name-style="western"><surname>Sjoding</surname><given-names>MW</given-names> </name></person-group><article-title>Diagnosing bias in data-driven algorithms for healthcare</article-title><source>Nat Med</source><year>2020</year><month>01</month><volume>26</volume><issue>1</issue><fpage>25</fpage><lpage>26</lpage><pub-id pub-id-type="doi">10.1038/s41591-019-0726-6</pub-id><pub-id pub-id-type="medline">31932798</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pfohl</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Foryciarz</surname><given-names>A</given-names> </name><name name-style="western"><surname>Shah</surname><given-names>NH</given-names> </name></person-group><article-title>An empirical characterization of fair machine learning for clinical risk prediction</article-title><source>J Biomed Inform</source><year>2021</year><month>01</month><volume>113</volume><fpage>103621</fpage><pub-id pub-id-type="doi">10.1016/j.jbi.2020.103621</pub-id><pub-id pub-id-type="medline">33220494</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Birdi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rabet</surname><given-names>R</given-names> </name><name name-style="western"><surname>Durant</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Bias in machine learning applications to address non-communicable diseases at a population-level: a scoping review</article-title><source>BMC Public Health</source><year>2024</year><volume>24</volume><issue>1</issue><fpage>1</fpage><lpage>16</lpage><pub-id pub-id-type="doi">10.1186/S12889-024-21081-9/FIGURES/2</pub-id><pub-id pub-id-type="medline">39732655</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shergill</surname><given-names>M</given-names> </name><name name-style="western"><surname>Durant</surname><given-names>S</given-names> </name><name name-style="western"><surname>Birdi</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Machine learning used to study risk factors for chronic diseases: A scoping review</article-title><source>Can J Public Health</source><year>2025</year><month>06</month><day>11</day><pub-id pub-id-type="doi">10.17269/s41997-025-01059-9</pub-id><pub-id pub-id-type="medline">40498391</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Christie</surname><given-names>GA</given-names> </name><name name-style="western"><surname>Nguyen</surname><given-names>TT</given-names> </name><name name-style="western"><surname>Freeman</surname><given-names>JD</given-names> </name><name name-style="western"><surname>Hsu</surname><given-names>EB</given-names> </name></person-group><article-title>Applications of artificial intelligence and machine learning in disasters and public health emergencies</article-title><source>Disaster Med Public Health Prep</source><year>2022</year><month>08</month><volume>16</volume><issue>4</issue><fpage>1674</fpage><lpage>1681</lpage><pub-id pub-id-type="doi">10.1017/dmp.2021.125</pub-id><pub-id pub-id-type="medline">34134815</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bozkurt</surname><given-names>S</given-names> </name><name name-style="western"><surname>Cahan</surname><given-names>EM</given-names> </name><name name-style="western"><surname>Seneviratne</surname><given-names>MG</given-names> </name><etal/></person-group><article-title>Reporting of demographic data and representativeness in machine learning models using electronic health records</article-title><source>J Am Med Inform Assoc</source><year>2020</year><month>12</month><day>9</day><volume>27</volume><issue>12</issue><fpage>1878</fpage><lpage>1884</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocaa164</pub-id><pub-id pub-id-type="medline">32935131</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Larrazabal</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Nieto</surname><given-names>N</given-names> </name><name name-style="western"><surname>Peterson</surname><given-names>V</given-names> </name><name name-style="western"><surname>Milone</surname><given-names>DH</given-names> </name><name name-style="western"><surname>Ferrante</surname><given-names>E</given-names> </name></person-group><article-title>Gender imbalance in medical imaging datasets produces biased classifiers for computer-aided diagnosis</article-title><source>Proc Natl Acad Sci U S A</source><year>2020</year><month>06</month><day>9</day><volume>117</volume><issue>23</issue><fpage>12592</fpage><lpage>12594</lpage><pub-id pub-id-type="doi">10.1073/pnas.1919012117</pub-id><pub-id pub-id-type="medline">32457147</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wark</surname><given-names>K</given-names> </name><name name-style="western"><surname>Woodbury</surname><given-names>RB</given-names> </name><name name-style="western"><surname>LaBrie</surname><given-names>S</given-names> </name><name name-style="western"><surname>Trainor</surname><given-names>J</given-names> </name><name name-style="western"><surname>Freeman</surname><given-names>M</given-names> </name><name name-style="western"><surname>Avey</surname><given-names>JP</given-names> </name></person-group><article-title>Engaging stakeholders in social determinants of health quality improvement efforts</article-title><source>Perm J</source><year>2022</year><month>12</month><day>19</day><volume>26</volume><issue>4</issue><fpage>28</fpage><lpage>38</lpage><pub-id pub-id-type="doi">10.7812/TPP/22.035</pub-id><pub-id pub-id-type="medline">36154895</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Jones</surname><given-names>N</given-names> </name><name name-style="western"><surname>Harrison</surname><given-names>J</given-names> </name><name name-style="western"><surname>Aguiar</surname><given-names>R</given-names> </name><name name-style="western"><surname>Munro</surname><given-names>L</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Nelson</surname><given-names>G</given-names> </name><name name-style="western"><surname>Kloos</surname><given-names>B</given-names> </name><name name-style="western"><surname>Ornelas</surname><given-names>J</given-names></name></person-group><article-title>Transforming research for transformative change in mental health: toward the future</article-title><source>Community Psychology and Community Mental Health</source><year>2024</year><publisher-name>Oxford Academic Press</publisher-name><fpage>351</fpage><lpage>372</lpage><pub-id pub-id-type="doi">10.1093/acprof:oso/9780199362424.001.0001</pub-id><pub-id pub-id-type="other">9780199362424</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vokinger</surname><given-names>KN</given-names> </name><name name-style="western"><surname>Feuerriegel</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kesselheim</surname><given-names>AS</given-names> </name></person-group><article-title>Mitigating bias in machine learning for medicine</article-title><source>Commun Med (Lond)</source><year>2021</year><month>08</month><day>23</day><volume>1</volume><issue>1</issue><fpage>25</fpage><pub-id pub-id-type="doi">10.1038/s43856-021-00028-w</pub-id><pub-id pub-id-type="medline">34522916</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Owoyemi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Owoyemi</surname><given-names>J</given-names> </name><name name-style="western"><surname>Osiyemi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Boyd</surname><given-names>A</given-names> </name></person-group><article-title>Artificial intelligence for healthcare in Africa</article-title><source>Front Digit Health</source><year>2020</year><volume>2</volume><fpage>6</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2020.00006</pub-id><pub-id pub-id-type="medline">34713019</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alaran</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Lawal</surname><given-names>SK</given-names> </name><name name-style="western"><surname>Jiya</surname><given-names>MH</given-names> </name><etal/></person-group><article-title>Challenges and opportunities of artificial intelligence in African health space</article-title><source>Digit Health</source><year>2025</year><volume>11</volume><fpage>20552076241305915</fpage><pub-id pub-id-type="doi">10.1177/20552076241305915</pub-id><pub-id pub-id-type="medline">39839959</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tanui</surname><given-names>CK</given-names> </name><name name-style="western"><surname>Ndembi</surname><given-names>N</given-names> </name><name name-style="western"><surname>Kebede</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Tessema</surname><given-names>SK</given-names> </name></person-group><article-title>Artificial intelligence to transform public health in Africa</article-title><source>Lancet Infect Dis</source><year>2024</year><month>09</month><volume>24</volume><issue>9</issue><fpage>e542</fpage><pub-id pub-id-type="doi">10.1016/S1473-3099(24)00435-3</pub-id><pub-id pub-id-type="medline">39053483</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Baxter</surname><given-names>MS</given-names> </name><name name-style="western"><surname>White</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lahti</surname><given-names>M</given-names> </name><name name-style="western"><surname>Murto</surname><given-names>T</given-names> </name><name name-style="western"><surname>Evans</surname><given-names>J</given-names> </name></person-group><article-title>Machine learning in a time of COVID-19 - can machine learning support community health workers (CHWs) in low and middle income countries (LMICs) in the new normal?</article-title><source>J Glob Health</source><year>2021</year><month>01</month><day>16</day><volume>11</volume><issue>3017</issue><fpage>03017</fpage><pub-id pub-id-type="doi">10.7189/jogh.11.03017</pub-id><pub-id pub-id-type="medline">33643627</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aitsi-Selmi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Murray</surname><given-names>V</given-names> </name></person-group><article-title>The Sendai framework: disaster risk reduction through a health lens</article-title><source>Bull World Health Organ</source><year>2015</year><month>06</month><day>1</day><volume>93</volume><issue>6</issue><fpage>362</fpage><pub-id pub-id-type="doi">10.2471/BLT.15.157362</pub-id><pub-id pub-id-type="medline">26240454</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Carroll</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Herczog</surname><given-names>E</given-names> </name><name name-style="western"><surname>Hudson</surname><given-names>M</given-names> </name><name name-style="western"><surname>Russell</surname><given-names>K</given-names> </name><name name-style="western"><surname>Stall</surname><given-names>S</given-names> </name></person-group><article-title>Operationalizing the CARE and FAIR principles for Indigenous data futures</article-title><source>Sci Data</source><year>2021</year><month>04</month><day>16</day><volume>8</volume><issue>1</issue><fpage>108</fpage><pub-id pub-id-type="doi">10.1038/s41597-021-00892-0</pub-id><pub-id pub-id-type="medline">33863927</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xiao</surname><given-names>C</given-names> </name><name name-style="western"><surname>Zhou</surname><given-names>J</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>J</given-names> </name><etal/></person-group><article-title>C-watcher: a framework for early detection of high-risk neighborhoods ahead of COVID-19 outbreak</article-title><source>Proc AAAI Conf Artif Intell</source><year>2021</year><volume>35</volume><issue>6</issue><fpage>4892</fpage><lpage>4900</lpage><pub-id pub-id-type="doi">10.1609/aaai.v35i6.16622</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>De Lusignan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Liyanage</surname><given-names>H</given-names> </name><name name-style="western"><surname>Di Iorio</surname><given-names>CT</given-names> </name><name name-style="western"><surname>Chan</surname><given-names>T</given-names> </name><name name-style="western"><surname>Liaw</surname><given-names>ST</given-names> </name></person-group><article-title>Using routinely collected health data for surveillance, quality improvement and research: framework and key questions to assess ethics, privacy and data access</article-title><source>J Innov Health Inform</source><year>2016</year><month>01</month><day>19</day><volume>22</volume><issue>4</issue><fpage>426</fpage><lpage>432</lpage><pub-id pub-id-type="doi">10.14236/jhi.v22i4.845</pub-id><pub-id pub-id-type="medline">26855276</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Floridi</surname><given-names>L</given-names> </name><name name-style="western"><surname>Cowls</surname><given-names>J</given-names> </name><name name-style="western"><surname>Beltrametti</surname><given-names>M</given-names> </name><etal/></person-group><article-title>AI4People-an ethical framework for a good AI society: opportunities, risks, principles, and recommendations</article-title><source>Minds Mach (Dordr)</source><year>2018</year><volume>28</volume><issue>4</issue><fpage>689</fpage><lpage>707</lpage><pub-id pub-id-type="doi">10.1007/s11023-018-9482-5</pub-id><pub-id pub-id-type="medline">30930541</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>IY</given-names> </name><name name-style="western"><surname>Pierson</surname><given-names>E</given-names> </name><name name-style="western"><surname>Rose</surname><given-names>S</given-names> </name><name name-style="western"><surname>Joshi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ferryman</surname><given-names>K</given-names> </name><name name-style="western"><surname>Ghassemi</surname><given-names>M</given-names> </name></person-group><article-title>Ethical machine learning in healthcare</article-title><source>Annu Rev Biomed Data Sci</source><year>2021</year><month>07</month><volume>4</volume><fpage>123</fpage><lpage>144</lpage><pub-id pub-id-type="doi">10.1146/annurev-biodatasci-092820-114757</pub-id><pub-id pub-id-type="medline">34396058</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Simon</surname><given-names>G</given-names> </name><name name-style="western"><surname>Aliferis</surname><given-names>C</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Simon</surname><given-names>GJ</given-names> </name><name name-style="western"><surname>Aliferis</surname><given-names>C</given-names> </name></person-group><article-title>Reporting standards, certification/accreditation, and reproducibility</article-title><source>Artificial Intelligence and Machine Learning in Health Care and Medical Sciences: Best Practices and Pitfalls</source><year>2024</year><publisher-name>Springer</publisher-name><fpage>693</fpage><lpage>707</lpage><pub-id pub-id-type="doi">10.1007/978-3-031-39355-6_17</pub-id><pub-id pub-id-type="other">9783031393556</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Char</surname><given-names>DS</given-names> </name><name name-style="western"><surname>Shah</surname><given-names>NH</given-names> </name><name name-style="western"><surname>Magnus</surname><given-names>D</given-names> </name></person-group><article-title>Implementing machine learning in health care - addressing ethical challenges</article-title><source>N Engl J Med</source><year>2018</year><month>03</month><day>15</day><volume>378</volume><issue>11</issue><fpage>981</fpage><lpage>983</lpage><pub-id pub-id-type="doi">10.1056/NEJMp1714229</pub-id><pub-id pub-id-type="medline">29539284</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gijsberts</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Groenewegen</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Hoefer</surname><given-names>IE</given-names> </name><etal/></person-group><article-title>Race/ethnic differences in the associations of the Framingham risk factors with carotid IMT and cardiovascular events</article-title><source>PLoS ONE</source><year>2015</year><volume>10</volume><issue>7</issue><fpage>e0132321</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0132321</pub-id><pub-id pub-id-type="medline">26134404</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Edgell</surname><given-names>C</given-names> </name><name name-style="western"><surname>Rosenberg</surname><given-names>A</given-names> </name></person-group><article-title>Putting plain language summaries into perspective</article-title><source>Curr Med Res Opin</source><year>2022</year><month>06</month><volume>38</volume><issue>6</issue><fpage>871</fpage><lpage>874</lpage><pub-id pub-id-type="doi">10.1080/03007995.2022.2058812</pub-id><pub-id pub-id-type="medline">35400253</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Kiseleva</surname><given-names>A</given-names> </name><name name-style="western"><surname>De Hert</surname><given-names>P</given-names> </name></person-group><article-title>Creating a European health data space. Obstacles in four key legal areas</article-title><source>SSRN Journal</source><comment>Preprint posted online on  May 18, 2021</comment><pub-id pub-id-type="doi">10.2139/ssrn.3846781</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Haneef</surname><given-names>R</given-names> </name><name name-style="western"><surname>Tijhuis</surname><given-names>M</given-names> </name><name name-style="western"><surname>Thi&#x00E9;baut</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Methodological guidelines to estimate population-based health indicators using linked data and/or machine learning techniques</article-title><source>Arch Public Health</source><year>2022</year><month>01</month><day>4</day><volume>80</volume><issue>1</issue><fpage>9</fpage><pub-id pub-id-type="doi">10.1186/s13690-021-00770-6</pub-id><pub-id pub-id-type="medline">34983651</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhao</surname><given-names>AP</given-names> </name><name name-style="western"><surname>Li</surname><given-names>S</given-names> </name><name name-style="western"><surname>Cao</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>AI for science: predicting infectious diseases</article-title><source>J Saf Sci Resil</source><year>2024</year><month>06</month><volume>5</volume><issue>2</issue><fpage>130</fpage><lpage>146</lpage><pub-id pub-id-type="doi">10.1016/j.jnlssr.2024.02.002</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>HE</given-names> </name><name name-style="western"><surname>Weiner</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Saria</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kharrazi</surname><given-names>H</given-names> </name></person-group><article-title>Evaluating algorithmic bias in 30-day hospital readmission models: retrospective analysis</article-title><source>J Med Internet Res</source><year>2024</year><month>04</month><day>18</day><volume>26</volume><issue>1</issue><fpage>e47125</fpage><pub-id pub-id-type="doi">10.2196/47125</pub-id><pub-id pub-id-type="medline">38422347</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Musbahi</surname><given-names>O</given-names> </name><name name-style="western"><surname>Syed</surname><given-names>L</given-names> </name><name name-style="western"><surname>Le Feuvre</surname><given-names>P</given-names> </name><name name-style="western"><surname>Cobb</surname><given-names>J</given-names> </name><name name-style="western"><surname>Jones</surname><given-names>G</given-names> </name></person-group><article-title>Public patient views of artificial intelligence in healthcare: a nominal group technique study</article-title><source>Digit Health</source><year>2021</year><volume>7</volume><fpage>20552076211063682</fpage><pub-id pub-id-type="doi">10.1177/20552076211063682</pub-id><pub-id pub-id-type="medline">34950499</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blouin Genest</surname><given-names>G</given-names> </name></person-group><article-title>World Health Organization and disease surveillance: jeopardizing global public health?</article-title><source>Health (London)</source><year>2015</year><month>11</month><volume>19</volume><issue>6</issue><fpage>595</fpage><lpage>614</lpage><pub-id pub-id-type="doi">10.1177/1363459314561771</pub-id><pub-id pub-id-type="medline">25504474</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fisher</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rosella</surname><given-names>LC</given-names> </name></person-group><article-title>Priorities for successful use of artificial intelligence by public health organizations: a literature review</article-title><source>BMC Public Health</source><year>2022</year><month>11</month><day>22</day><volume>22</volume><issue>1</issue><fpage>2146</fpage><pub-id pub-id-type="doi">10.1186/s12889-022-14422-z</pub-id><pub-id pub-id-type="medline">36419010</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Russo</surname><given-names>F</given-names> </name><name name-style="western"><surname>Schliesser</surname><given-names>E</given-names> </name><name name-style="western"><surname>Wagemans</surname><given-names>J</given-names> </name></person-group><article-title>Connecting ethics and epistemology of AI</article-title><source>AI &#x0026; Soc</source><year>2024</year><month>08</month><volume>39</volume><issue>4</issue><fpage>1585</fpage><lpage>1603</lpage><pub-id pub-id-type="doi">10.1007/s00146-022-01617-6</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="web"><article-title>Safe, secure, and trustworthy development and use of artificial intelligence</article-title><source>Federal Register</source><access-date>2025-05-09</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.federalregister.gov/documents/2023/11/01/2023-24283/safe-secure-and-trustworthy-development-and-use-of-artificial-intelligence">https://www.federalregister.gov/documents/2023/11/01/2023-24283/safe-secure-and-trustworthy-development-and-use-of-artificial-intelligence</ext-link></comment></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="web"><article-title>Regulation (EU) 2024/1689 of the european parliament and of the council of 13 june 2024 laying down harmonised rules on artificial intelligence and amending regulations (EC) no 300/2008, (EU) no 167/2013, (EU) no 168/2013, (EU) 2018/858, (EU) 2018/1139 and (EU) 2019/2144 and directives 2014/90/EU, (EU) 2016/797 and (EU) 2020/1828 (artificial intelligence act)</article-title><source>European Union</source><access-date>2025-05-09</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://eur-lex.europa.eu/eli/reg/2024/1689/oj">https://eur-lex.europa.eu/eli/reg/2024/1689/oj</ext-link></comment></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="web"><article-title>General Data Protection Regulation (GDPR) &#x2013; legal text</article-title><source>Intersoft Consulting Services</source><access-date>2025-05-09</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://gdpr-info.eu/">https://gdpr-info.eu/</ext-link></comment></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="web"><article-title>Implementing the UK&#x2019;s AI regulatory principles: initial guidance for regulators</article-title><source>Government of UK</source><year>2024</year><access-date>2025-10-01</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.gov.uk/government/publications/implementing-the-uks-ai-regulatory-principles-initial-guidance-for-regulators">https://www.gov.uk/government/publications/implementing-the-uks-ai-regulatory-principles-initial-guidance-for-regulators</ext-link></comment></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="web"><article-title>Pan-Canadian artificial intelligence strategy</article-title><source>Government of Canada</source><access-date>2025-10-01</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://ised-isde.canada.ca/site/ai-strategy/en">https://ised-isde.canada.ca/site/ai-strategy/en</ext-link></comment></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1 </label><p>Guideline team composition.</p><media xlink:href="publichealth_v11i1e68952_app1.docx" xlink:title="DOCX File, 16 KB"/></supplementary-material></app-group></back></article>