<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v3.0 20080202//EN" "journalpublishing3.dtd">
<article xml:lang="en" article-type="research-article" xmlns:xlink="http://www.w3.org/1999/xlink">
<?release-delay 0|0?>
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">MCO</journal-id>
<journal-title-group>
<journal-title>Molecular and Clinical Oncology</journal-title>
</journal-title-group>
<issn pub-type="ppub">2049-9450</issn>
<issn pub-type="epub">2049-9469</issn>
<publisher>
<publisher-name>D.A. Spandidos</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">MCO-16-2-02460</article-id>
<article-id pub-id-type="doi">10.3892/mco.2021.2460</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Articles</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>An artificial intelligence-assisted diagnostic system improves the accuracy of image diagnosis of uterine cervical lesions</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Ito</surname><given-names>Yu</given-names></name>
<xref rid="af1-MCO-16-2-02460" ref-type="aff">1</xref>
<xref rid="fn1-MCO-16-2-02460" ref-type="author-notes">&#x002A;</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Miyoshi</surname><given-names>Ai</given-names></name>
<xref rid="af1-MCO-16-2-02460" ref-type="aff">1</xref>
<xref rid="fn1-MCO-16-2-02460" ref-type="author-notes">&#x002A;</xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Ueda</surname><given-names>Yutaka</given-names></name>
<xref rid="af1-MCO-16-2-02460" ref-type="aff">1</xref>
<xref rid="c1-MCO-16-2-02460" ref-type="corresp"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Tanaka</surname><given-names>Yusuke</given-names></name>
<xref rid="af1-MCO-16-2-02460" ref-type="aff">1</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Nakae</surname><given-names>Ruriko</given-names></name>
<xref rid="af1-MCO-16-2-02460" ref-type="aff">1</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Morimoto</surname><given-names>Akiko</given-names></name>
<xref rid="af1-MCO-16-2-02460" ref-type="aff">1</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Shiomi</surname><given-names>Mayu</given-names></name>
<xref rid="af1-MCO-16-2-02460" ref-type="aff">1</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Enomoto</surname><given-names>Takayuki</given-names></name>
<xref rid="af2-MCO-16-2-02460" ref-type="aff">2</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Sekine</surname><given-names>Masayuki</given-names></name>
<xref rid="af2-MCO-16-2-02460" ref-type="aff">2</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Sasagawa</surname><given-names>Toshiyuki</given-names></name>
<xref rid="af3-MCO-16-2-02460" ref-type="aff">3</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Yoshino</surname><given-names>Kiyoshi</given-names></name>
<xref rid="af4-MCO-16-2-02460" ref-type="aff">4</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Harada</surname><given-names>Hiroshi</given-names></name>
<xref rid="af4-MCO-16-2-02460" ref-type="aff">4</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Nakamura</surname><given-names>Takafumi</given-names></name>
<xref rid="af5-MCO-16-2-02460" ref-type="aff">5</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Murata</surname><given-names>Takuya</given-names></name>
<xref rid="af5-MCO-16-2-02460" ref-type="aff">5</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Hiramatsu</surname><given-names>Keizo</given-names></name>
<xref rid="af6-MCO-16-2-02460" ref-type="aff">6</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Saito</surname><given-names>Junko</given-names></name>
<xref rid="af7-MCO-16-2-02460" ref-type="aff">7</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Yagi</surname><given-names>Junko</given-names></name>
<xref rid="af8-MCO-16-2-02460" ref-type="aff">8</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Tanaka</surname><given-names>Yoshiaki</given-names></name>
<xref rid="af9-MCO-16-2-02460" ref-type="aff">9</xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Kimura</surname><given-names>Tadashi</given-names></name>
<xref rid="af1-MCO-16-2-02460" ref-type="aff">1</xref>
</contrib>
</contrib-group>
<aff id="af1-MCO-16-2-02460"><label>1</label>Department of Obstetrics and Gynecology, Osaka University Graduate School of Medicine, Suita, Osaka 567-0871, Japan</aff>
<aff id="af2-MCO-16-2-02460"><label>2</label>Department of Obstetrics and Gynecology, Niigata University Graduate School of Medicine, Chuo-ku, Niigata 951-8520, Japan</aff>
<aff id="af3-MCO-16-2-02460"><label>3</label>Department of Obstetrics and Gynecology, Kanazawa Medical University, Uchinada, Ishikawa 920-0293, Japan</aff>
<aff id="af4-MCO-16-2-02460"><label>4</label>Department of Obstetrics and Gynecology, University of Occupational and Environmental Health, Kitakyushu, Fukuoka 807-8556, Japan</aff>
<aff id="af5-MCO-16-2-02460"><label>5</label>Department of Obstetrics and Gynecology, Kawasaki Medical University, Kurashiki, Okayama 701-0192, Japan</aff>
<aff id="af6-MCO-16-2-02460"><label>6</label>Hiramatsu Obstetrics and Gynecology Clinic, Kishiwada-shi, Osaka 583-0024, Japan</aff>
<aff id="af7-MCO-16-2-02460"><label>7</label>Saito Women Clinic, Yodogawa-ku, Osaka 532-0003, Japan</aff>
<aff id="af8-MCO-16-2-02460"><label>8</label>Ladies Clinic Yagi, Senboku-gunn, Osaka 595-0805, Japan</aff>
<aff id="af9-MCO-16-2-02460"><label>9</label>Maki Ladies Clinic, Ibaraki-shi, Osaka 567-0031, Japan</aff>
<author-notes>
<corresp id="c1-MCO-16-2-02460"><italic>Correspondence to:</italic> Dr Yutaka Ueda, Department of Obstetrics and Gynecology, Osaka University Graduate School of Medicine, 2-2 Yamadaoka, Suita, Osaka 567-0871, Japan <email>ZVF03563@nifty.ne.jp</email></corresp>
<fn id="fn1-MCO-16-2-02460"><p><sup>&#x002A;</sup>Contributed equally</p></fn>
<fn><p><italic>Abbreviations:</italic> AI, artificial intelligence; AISD, AI-image-assisted diagnosis; CIN, cervical intraepithelial neoplasia</p></fn>
</author-notes>
<pub-date pub-type="ppub">
<month>02</month>
<year>2022</year></pub-date>
<pub-date pub-type="epub">
<day>08</day>
<month>12</month>
<year>2021</year></pub-date>
<volume>16</volume>
<issue>2</issue>
<elocation-id>27</elocation-id>
<history>
<date date-type="received">
<day>25</day>
<month>08</month>
<year>2021</year>
</date>
<date date-type="accepted">
<day>07</day>
<month>10</month>
<year>2021</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2020, Spandidos Publications</copyright-statement>
<copyright-year>2020</copyright-year>
</permissions>
<abstract>
<p>The present study created an artificial intelligence (AI)-automated diagnostics system for uterine cervical lesions and assessed the performance of these images for AI diagnostic imaging of pathological cervical lesions. A total of 463 colposcopic images were analyzed. The traditional colposcopy diagnoses were compared to those obtained by AI image diagnosis. Next, 100 images were presented to a panel of 32 gynecologists who independently examined each image in a blinded fashion and diagnosed them for four categories of tumors. Then, the 32 gynecologists revisited their diagnosis for each image after being informed of the AI diagnosis. The present study assessed any changes in physician diagnosis and the accuracy of AI-image-assisted diagnosis (AISD). The accuracy of AI was 57.8&#x0025; for normal, 35.4&#x0025; for cervical intraepithelial neoplasia (CIN)1, 40.5&#x0025; for CIN2-3 and 44.2&#x0025; for invasive cancer. The accuracy of gynecologist diagnoses from cervical pathological images, before knowing the AI image diagnosis, was 54.4&#x0025; for CIN2-3 and 38.9&#x0025; for invasive cancer. After learning of the AISD, their accuracy improved to 58.0&#x0025; for CIN2-3 and 48.5&#x0025; for invasive cancer. AI-assisted image diagnosis was able to improve gynecologist diagnosis accuracy significantly (P&#x003C;0.01) for invasive cancer and tended to improve their accuracy for CIN2-3 (P=0.14).</p>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>deep learning</kwd>
<kwd>image diagnosis</kwd>
<kwd>colposcopy</kwd>
<kwd>cervical intraepithelial neoplasia</kwd>
<kwd>cervical cancer</kwd>
</kwd-group>
<funding-group>
<funding-statement><bold>Funding:</bold> No funding was received.</funding-statement>
</funding-group>
</article-meta>
</front>
<body>
<sec sec-type="intro">
<title>Introduction</title>
<p>Every year, &#x007E;500,000 women are affected with cervical cancer worldwide and &#x007E;270,000 women succumb to this disease (<xref rid="b1-MCO-16-2-02460" ref-type="bibr">1</xref>). The cervical cancer frequency is higher in still-advancing countries that typically have far fewer medical resources (<xref rid="b2-MCO-16-2-02460" ref-type="bibr">2</xref>). As the world&#x0027;s population grows and ages, cases of cervical cancer have the potential to increase significantly.</p>
<p>The traditional biopsy routine for cervical cancer diagnosis is that gynecologists manually observe the uterine cervix with a colposcope and decide where to obtain a tissue sample for more detailed microscopic examination. There are problems to this method. First, colposcopes are large and expensive. Second, gynecologists require a great deal of practical experience in deciding correctly from which part of the cervix is best to obtain the tissue.</p>
<p>To address this shortcoming, the present study created a system of AI-assisted image diagnosis (AISD) for cervical lesions. This AI system can guide the inexperienced in their selection of the best biopsy sites. If AISD for cervical lesions could be normalized for use in professional practice, the biopsy itself might become obsolete, or used only when absolutely needed for a definitive opinion. This economical and simple improvement in diagnostic capabilities would reduce the burden for gynecologists and could be expanded to medical facilities in localities, regions and advancing countries that have fewer medical resources. This would be conducive for provision of proper medical treatments and decreasing the overall cervical cancer burden (<xref rid="f1-MCO-16-2-02460" ref-type="fig">Fig. 1</xref>).</p>
<p>Tanaka <italic>et al</italic> (<xref rid="b3-MCO-16-2-02460" ref-type="bibr">3</xref>) were the first to report the capability of a smartphone for diagnostic-assistance (Smartscopy; Apple Inc.). They found that it can detect 90.8&#x0025; of the same pathological cervical lesion detectable by colposcopy. The detection sensitivity of pathological lesions that were for cervical intraepithelial neoplasia (CIN)2 or greater was 92&#x0025;. From this work, they concluded that the imaging quality of Smartscopy is appropriate for diagnosis.</p>
<p>The aim of the present study was to achieve AISD for cervical lesion using images taken by Smartscopy and report the performance assessment of AISD for cervical lesions taken by colposcope. This system could be subsequently applied toward Smartscopy images.</p>
</sec>
<sec sec-type="Materials|methods">
<title>Materials and methods</title>
<p>The present study was a cooperative research project with Kyocera Corporation, a maker of advanced smartphones and AI software. University Clinical Research Review Committee approved this research &#x005B;17257(T7)-8&#x005D;. All methods were performed in accordance with the relevant guidelines and regulations.</p>
<sec>
<title/>
<sec>
<title>Patients</title>
<p>Colposcopy and biopsy were performed on 463 patients by gynecologic oncologists at the Osaka University Hospital between January 2010 and August 2019. The median age of the patients was 46 years (range 23-82). This is a retrospective study in which the patient data was fully de-identified. The present study was approved by the Institutional Review Board and the Ethics Committee of the Osaka University Hospital &#x005B;approval no. 17257(T7)-8&#x005D;. The researchers obtained informed consent from participants of the survey on the questionnaire, which was anonymous. The present study included only those who consented to participate.</p>
</sec>
<sec>
<title>Images of pathological lesions</title>
<p>A total of 463 images from 463 patients taken by colposcope were analyzed. The images were of pathological cervical lesions processed with acetic acid prior to biopsy. These images were cropped to 224x224 pixels and saved as JPEG files. Gynecologic oncologists annotated the images according to pathological lesions (<xref rid="f2-MCO-16-2-02460" ref-type="fig">Fig. 2</xref>). The images were used retrospectively as the input data for deep learning by Kyocera. Of 463 images, 120 were normal, 120 were CIN1, 113 were CIN2-3 and 110 were of invasive cancer (<xref rid="tI-MCO-16-2-02460" ref-type="table">Table I</xref>).</p>
</sec>
<sec>
<title>Preparation</title>
<p>A randomly selected subset of 115 of the 463 images was employed as a &#x2018;test dataset&#x2019; and the remaining 348 images were used as the training dataset (<xref rid="tI-MCO-16-2-02460" ref-type="table">Table I</xref>). Next, 25&#x0025; of the training dataset was used in Group 1, 50&#x0025; in Group 2, 75&#x0025; in Group 3 and all of the training dataset was used in Group 4.</p>
<p>The number of images was also increased. The use of triple images for the training dataset was investigated by adding rotated or blurred images and quadruple images were tested by changing the hue, chroma (purity or intensity of color) and brightness (HSV), as is the standard practice in computer image analysis.</p>
</sec>
<sec>
<title>AI image diagnosis</title>
<p>GoogLeNet (Inception v1) (<xref rid="b4-MCO-16-2-02460" ref-type="bibr">4</xref>) software was used with a convolutional neural network. After the images for the training dataset were investigated using deep learning with a convolutional neural network, the traditional colposcopy diagnosis and AI image diagnosis for the test dataset were compared.</p>
</sec>
<sec>
<title>Human accuracy assisted with AI</title>
<p>During the period between October of 2020 and January of 2021, 100 images (25 images for each pathology category) were presented to a panel of 32 gynecologists in the Osaka University Graduate School of Medicine, Niigata University Graduate School of Medicine, Kanazawa Medical University, University of Occupational and Environmental Health, Kawasaki Medical University, Hiramatsu Obstetrics and Gynecology Clinic, Saito Women Clinic, Ladies Clinic Yagi and Maki Ladies Clinic. They diagnosed each image as belonging to one of the four categories. Next, they re-diagnosed every image after the AI diagnosis was revealed to them. Changes in human diagnosis and the accuracy of the AI-image diagnosis was assessed.</p>
</sec>
<sec>
<title>Statistical analysis</title>
<p>Using Medcalc (<ext-link ext-link-type="uri" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://www.medcalc.org">https://www.medcalc.org</ext-link>), differences between groups were calculated by the &#x03C7;<sup>2</sup> test and the logistic regression test for categorical variables. P&#x003C;0.05 was considered to indicate a statistically significant difference.</p>
</sec>
</sec>
</sec>
<sec sec-type="Results">
<title>Results</title>
<sec>
<title/>
<sec>
<title>Performance of AI for diagnosis for pathological lesions</title>
<p>The average accuracy of diagnosis for pathological lesions solely by AI was 43.5&#x0025;. For the four categories, the accuracy was 57.8&#x0025; for normal, 35.4&#x0025; for CIN1, 40.5&#x0025; for CIN2-3 and 44.2&#x0025; for invasive cancer (<xref rid="tII-MCO-16-2-02460" ref-type="table">Table II</xref>).</p>
<p>To improve accuracy, the number of images per slide for the training dataset were changed. <xref rid="tIII-MCO-16-2-02460" ref-type="table">Table III</xref> shows the accuracy for each category. In group 1, 25&#x0025; images were used for the training dataset and the accuracy was 48.2&#x0025; for normal, 19.9&#x0025; for CIN1, 30.4&#x0025; for CIN2-3 and 54.1&#x0025; for invasive cancer. In group 2, 50&#x0025; images were used for the training dataset, 25&#x0025; images more than for group 1; the group 2 accuracy was 47.7&#x0025; for normal, 29.7&#x0025; for CIN1, 29.5&#x0025; for CIN2-3 and 52.5&#x0025; for invasive cancer. In group 3, 75&#x0025; images were used for the training dataset; the accuracy was 53.6&#x0025; for normal, 30.1&#x0025; for CIN1, 42.9&#x0025; for CIN2-3 and 46.3&#x0025; for invasive cancer. In group 4, 100&#x0025; images were used for the training dataset and the accuracy improved to 57.8&#x0025; for normal, 35.4&#x0025; for CIN1, 40.5&#x0025; for CIN2-3 and 44.2&#x0025; for invasive cancer. Increasing the number of images in the training dataset beyond 25&#x0025; did not lead to a significant improvement in the accuracy for the four categories (<xref rid="tIII-MCO-16-2-02460" ref-type="table">Table III</xref>).</p>
<p>Next, whether increasing the number of images per slide could improve the accuracy of image diagnosis, as is standard practice in computer science was investigated. Tripling the number of images for the training dataset by adding rotated and blurred images and quadrupling the images by changing HSV was also investigated. However, none of these efforts improved upon the accuracy of using a single image.</p>
</sec>
<sec>
<title>AI-assisted image diagnosis</title>
<p>The accuracy of the human diagnosis of cervical pathological images by gynecologists before knowing the diagnosis from AI was 64.8&#x0025; for normal, 54.4&#x0025; for CIN1, 54.4&#x0025; for CIN2-3 and 38.9&#x0025; for invasive cancer. Once they became aware of the AI diagnosis, the human diagnosis accuracy was 63.3&#x0025; for normal, 51.1&#x0025; for CIN1, 58.0&#x0025; for CIN2-3 and 48.5&#x0025; for invasive cancer (<xref rid="tIV-MCO-16-2-02460" ref-type="table">Table IV</xref>).</p>
</sec>
</sec>
</sec>
<sec sec-type="Discussion">
<title>Discussion</title>
<p>AI is being applied across various disciplines, including phonetic recognition, image recognition, face recognition and automated driving technology. Similarly, AI applications are expected to evolve rapidly in many medical fields (<xref rid="b5-MCO-16-2-02460" ref-type="bibr">5</xref>). The medical sector is heavily burdened with many challenges to overcome. There is scarcity of medical professionals, area to area medical bias, bias between treatment departments, crushing labor hours, lapses in safety and stability of the medical delivery system. AI can potentially reduce or resolve many of these problems.</p>
<p>Incorporating AI into medical practices is expected to improve the medical environment across Japan. Patients could receive safer and more adequate medical services, the overload of medical professionals could be reduced and new methods of diagnosis and treatment could be developed.</p>
<p>The Japanese Ministry of Health, Labor and Welfare has selected six important areas for AI development (<xref rid="b6-MCO-16-2-02460" ref-type="bibr">6</xref>). These encompass genomic medicine, diagnostic imaging, assistance with diagnosis and treatment, drug development, caregiving for dementia and surgical assistance. Among these areas, diagnostic imaging is regarded the most practical for rapid AI adoption as most medical images are already digitized and application of established AI-associated technology would be easier.</p>
<p>The number of AI image recognition software has seen dramatic recent increase and there have already been reports of AI automated diagnosis being conducted (<xref rid="tV-MCO-16-2-02460" ref-type="table">Table V</xref>).</p>
<p>As pioneers in this field, Hu <italic>et al</italic> (<xref rid="b7-MCO-16-2-02460" ref-type="bibr">7</xref>) report that automated visual evaluation of cervigrams can identify precancer/cancer cases with great accuracy &#x005B;area under the curve (AUC)=0.91&#x005D;. Xue <italic>et al</italic> (<xref rid="b8-MCO-16-2-02460" ref-type="bibr">8</xref>) report the potential of AI to address the colposcopic bottleneck, which could assist colposcopists in colposcopy image diagnosis, the detection of underlying CINs and the guidance of biopsy sites.</p>
<p>Yuan <italic>et al</italic> (<xref rid="b9-MCO-16-2-02460" ref-type="bibr">9</xref>) report that the sensitivity, specificity and accuracy of the classification model to differentiate negative cases from positive cases were 85.38, 82.62 and 84.10&#x0025;, respectively, with an AUC of 0.93. The recall and S&#x00F8;rensen-Dice coefficient of the segmentation model to segment suspicious lesions in acetic acid images were 84.73 and 61.64&#x0025;, with an average accuracy of 95.59&#x0025;. Furthermore, 84.67&#x0025; of high-grade lesions were detected by the acetic detection model. Compared to colposcopists, the diagnostic system showed improved performance for ordinary colposcopy images but was slightly unsatisfactory for high-definition images.</p>
<p>Furthermore, Xue <italic>et al</italic> (<xref rid="b10-MCO-16-2-02460" ref-type="bibr">10</xref>) report that automated visual evaluation by smartphones can be a useful adjunct to health-worker visual assessment with acetic acid, a cervical cancer screening method commonly used in low- and middle-resource settings. Miyagi <italic>et al</italic> (<xref rid="b11-MCO-16-2-02460" ref-type="bibr">11</xref>) report the feasibility of using deep learning to classify cervical squamous epithelial lesions (SILs) from colposcopy images combined with human papillomavirus (HPV) types. The sensitivity, specificity, positive predictive value, negative predictive value and the AUC &#x00B1; standard error for AI colposcopy combined with HPV types and pathological results were 0.956 (43/45), 0.833 (5/6), 0.977 (43/44), 0.714 (5/7) and 0.963&#x00B1;0.026, respectively.</p>
<p>Tan <italic>et al</italic> (<xref rid="b12-MCO-16-2-02460" ref-type="bibr">12</xref>) report that computer-based deep learning methods can achieve high-accuracy fast cancer screening using thin-prep cytological test images. This system could classify the images and generate a test report in &#x007E;3 min with high performance (the sensitivity and specificity was 99.4 and 34.8&#x0025;, respectively, with an AUC of 0.67).</p>
<p>In all of these reports, cervical pathology was divided into two or three categories, atypical squamous cells of undetermined significance, low grade (L)SIL (normal and CIN1) and high grade (H)SIL (CIN2 and over). The present study is the first (to the best of the authors&#x0027; knowledge) to report the evaluation of AI image diagnosis using four categories. The average accuracy was 43.5&#x0025; particularly for CIN2-3 and 44&#x0025; for invasive cancer. This is lower than the accuracy of the other two categories. To further improve AI accuracy, the number of training dataset images for various methods was increased, but this was unsuccessful.</p>
<p>In the future, diagnosis using images captured by the Smartscope will be evaluated. It is hypothesized that AI accuracy might be improved with improved context and timing of image-acquisition.</p>
<p>The present study reported, for first time to the best of the authors&#x0027; knowledge, on the integration of AI and human image diagnosis for uterine cervical pathological lesions. It was evident that AI-assisted image diagnosis could significantly improve the gynecologist&#x0027;s accuracy for diagnosing of the category of invasive cervical cancer and it tended to improve diagnosis accuracy for CIN2-3, but not CIN1 and normal.</p>
<p>When comparing the initial accuracy of AI and humans diagnoses, the accuracy of humans was higher for normal and CIN1 (64.8 and 54.4&#x0025;, respectively. AI-assisted accuracy was higher for CIN2-3 and invasive cancer (58 and 48.5&#x0025;, respectively).</p>
<p>For mammography screening, AI advances could be used to increase screening accuracy by reducing missed cancers and false positives. Salim <italic>et al</italic> (<xref rid="b13-MCO-16-2-02460" ref-type="bibr">13</xref>) performed AI computer-aided detection algorithms as independent mammography readers and assessed the screening performance when combined with radiologists. The results indicate that AI computer-aided detection algorithms can assess screening mammograms with a sufficient diagnostic performance that could be further evaluated as an independent readers in prospective clinical trials.</p>
<p>Schaffter <italic>et al</italic> (<xref rid="b14-MCO-16-2-02460" ref-type="bibr">14</xref>) evaluated whether AI could overcome human mammography interpretation limitations; &#x003E;1,100 subjects, comprising 126 teams from 44 countries participated. The top-performing algorithms achieved an AUC of 0.858 (United States) and 0.903 (Sweden) and a specificity of 66.2&#x0025; (United States) and 81.2&#x0025; (Sweden) compared with the radiologists&#x0027; sensitivity, which was lower than community-practice radiologists&#x0027; specificity of 90.5&#x0025; (United States) and 98.5&#x0025; (Sweden). Combining top-performing algorithms and US radiologist assessments resulted in a higher AUC of 0.942 and achieved a significantly improved specificity (92.0&#x0025;) at the same sensitivity.</p>
<p>Humans are still responsible for any AI-assisted diagnosis in Japan. At present, it need not be argued &#x2018;Which is better, human or AI?&#x2019; or &#x2018;Will humans be dumped into the dustbin of medical history?&#x2019; Instead, we are looking toward a way to realize the powerful potential of human and AI cooperation in medicine.</p>
<p>The present study has a limitation. The accurate diagnosis rate of AI-based diagnosis is in the 40&#x0025; range, which cannot be used in clinical practice. This could be attributed to the evaluation of AI image diagnosis in four categories in the current study. In previous reports (<xref rid="b7-MCO-16-2-02460" ref-type="bibr">7</xref>,<xref rid="b9-MCO-16-2-02460 b10-MCO-16-2-02460 b11-MCO-16-2-02460 b12-MCO-16-2-02460" ref-type="bibr">9-12</xref>), the cervical pathology was divided into two or three categories. In the present study, the diagnostic accuracy when divided into two categories was 79.4&#x0025; in HSIL and 87.0&#x0025; in LSIL, comparable to other reports (<xref rid="b7-MCO-16-2-02460" ref-type="bibr">7</xref>,<xref rid="b9-MCO-16-2-02460 b10-MCO-16-2-02460 b11-MCO-16-2-02460 b12-MCO-16-2-02460" ref-type="bibr">9-12</xref>). In some reports, the accuracy for detecting HSIL by colposcopy was &#x007E;80-90&#x0025;, the sensitivity was &#x007E;80&#x0025; and specificity was &#x007E;70&#x0025; (<xref rid="b15-MCO-16-2-02460" ref-type="bibr">15</xref>,<xref rid="b16-MCO-16-2-02460" ref-type="bibr">16</xref>). This level is felt necessary for clinical utility, which might be a limitation in colposcopic diagnosis.</p>
<p>For four categories of cervical cancer pathology diagnosis, the accuracy of AI image diagnosis was 57.8&#x0025; for normal, 35.4&#x0025; for CIN1, 40.5&#x0025; for CIN2-3 and 44.2&#x0025; for invasive cancer. AI-assisted image diagnosis significantly improved the diagnostic accuracy of the gynecologist for invasive cancer and tended to improve slightly the gynecologist&#x0027;s accuracy for CIN2-3, but it did not improve the gynecologist&#x0027;s accuracy regarding the categories of CIN1 and normal cervix.</p>
</sec>
</body>
<back>
<ack>
<title>Acknowledgements</title>
<p>The authors would like to thank Dr GS Buzard (Department of Obstetrics and Gynecology, Osaka University Graduate School of Medicine) for his constructive criticism and editing of our manuscript.</p>
</ack>
<sec sec-type="data-availability">
<title>Availability of data and materials</title>
<p>The datasets during and/or analyzed during the current study available from the corresponding author on reasonable request.</p>
</sec>
<sec>
<title>Authors&#x0027; contributions</title>
<p>YI designed the study and interpreted the results, AM wrote the manuscript, designed the study and interpreted the results, YU designed the study and interpreted the results, YT, RN, AM, MSh, TE, MSe, TE, TS, KY, HH, TN, TM, KH, JS, JY, YT and TK performed sample preparation. AM and YI confirm the authenticity of all the raw data. All authors reviewed and approved the final manuscript.</p>
</sec>
<sec>
<title>Ethics approval and consent to participate</title>
<p>The present study was approved by the Institutional Review Board and the Ethics Committee of the Osaka University Hospital &#x005B;approval no. 17257(T7)-8&#x005D;. The researchers obtained informed consent from participants of the survey on the questionnaire, which was anonymous. The present study included only those who consented to participate.</p>
</sec>
<sec>
<title>Patient consent for publication</title>
<p>Not applicable.</p>
</sec>
<sec sec-type="COI-statement">
<title>Competing interest</title>
<p>The present study was a cooperative research project with Kyocera Corporation.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="b1-MCO-16-2-02460"><label>1</label><element-citation publication-type="journal"><comment>WHO Disease and Injury Country Estimates. Available from: <ext-link ext-link-type="uri" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://www.who.int/healthinfo/global_burden_disease/estimates_country/en/">https://www.who.int/healthinfo/global_burden_disease/estimates_country/en/</ext-link>.</comment></element-citation></ref>
<ref id="b2-MCO-16-2-02460"><label>2</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hull</surname><given-names>R</given-names></name><name><surname>Mbele</surname><given-names>M</given-names></name><name><surname>Makhafola</surname><given-names>T</given-names></name><name><surname>Hicks</surname><given-names>C</given-names></name><name><surname>Wang</surname><given-names>SM</given-names></name><name><surname>Reis</surname><given-names>RM</given-names></name><name><surname>Mehrotra</surname><given-names>R</given-names></name><name><surname>Mkhize-Kwitshana</surname><given-names>Z</given-names></name><name><surname>Kibiki</surname><given-names>G</given-names></name><name><surname>Bates</surname><given-names>DO</given-names></name><name><surname>Dlamini</surname><given-names>Z</given-names></name></person-group><article-title>Cervical cancer in low and middle-income countries</article-title><source>Oncol Lett</source><volume>20</volume><fpage>2058</fpage><lpage>2074</lpage><year>2020</year><pub-id pub-id-type="pmid">32782524</pub-id><pub-id pub-id-type="doi">10.3892/ol.2020.11754</pub-id></element-citation></ref>
<ref id="b3-MCO-16-2-02460"><label>3</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tanaka</surname><given-names>Y</given-names></name><name><surname>Ueda</surname><given-names>Y</given-names></name><name><surname>Kakubari</surname><given-names>R</given-names></name><name><surname>Kakuda</surname><given-names>M</given-names></name><name><surname>Kubota</surname><given-names>S</given-names></name><name><surname>Matsuzaki</surname><given-names>S</given-names></name><name><surname>Okazawa</surname><given-names>A</given-names></name><name><surname>Egawa-Takata</surname><given-names>T</given-names></name><name><surname>Matsuzaki</surname><given-names>S</given-names></name><name><surname>Kobayashi</surname><given-names>E</given-names></name><name><surname>Kimura</surname><given-names>T</given-names></name></person-group><article-title>Histologic correlation between smartphone and coloposcopic findings in patients with abnormal cervical cytology: Experiences in a tertiary referral hospital</article-title><source>Am J Obstet Gynecol</source><volume>221</volume><fpage>241.e1</fpage><lpage>241.e6</lpage><year>2019</year><pub-id pub-id-type="pmid">31075244</pub-id><pub-id pub-id-type="doi">10.1016/j.ajog.2019.04.039</pub-id></element-citation></ref>
<ref id="b4-MCO-16-2-02460"><label>4</label><element-citation publication-type="journal"><comment><ext-link ext-link-type="uri" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://towardsdatascience.com/a-simple-guide-to-the-versions-of-the-inception-network-7fc52b863202">https://towardsdatascience.com/a-simple-guide-to-the-versions-of-the-inception-network-7fc52b863202</ext-link>.</comment></element-citation></ref>
<ref id="b5-MCO-16-2-02460"><label>5</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jiang</surname><given-names>F</given-names></name><name><surname>Jiang</surname><given-names>Y</given-names></name><name><surname>Zhi</surname><given-names>H</given-names></name><name><surname>Dong</surname><given-names>Y</given-names></name><name><surname>Li</surname><given-names>H</given-names></name><name><surname>Ma</surname><given-names>S</given-names></name><name><surname>Wang</surname><given-names>Y</given-names></name><name><surname>Dong</surname><given-names>Q</given-names></name><name><surname>Shen</surname><given-names>H</given-names></name><name><surname>Wang</surname><given-names>Y</given-names></name></person-group><article-title>Artificial intelligence in healthecare: Past, present and future</article-title><source>Strole Vasc Neurol</source><volume>2</volume><fpage>230</fpage><lpage>243</lpage><year>2017</year><pub-id pub-id-type="pmid">29507784</pub-id><pub-id pub-id-type="doi">10.1136/svn-2017-000101</pub-id></element-citation></ref>
<ref id="b6-MCO-16-2-02460"><label>6</label><element-citation publication-type="journal"><comment>WHO: The Global Health Observatory. Explore a world of health data. Available from: <ext-link ext-link-type="uri" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://www.mhlw.go.jp/content/10601000/000568486.pdf">https://www.mhlw.go.jp/content/10601000/000568486.pdf</ext-link>.</comment></element-citation></ref>
<ref id="b7-MCO-16-2-02460"><label>7</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname><given-names>L</given-names></name><name><surname>Bell</surname><given-names>D</given-names></name><name><surname>Antani</surname><given-names>S</given-names></name><name><surname>Xue</surname><given-names>Z</given-names></name><name><surname>Yu</surname><given-names>K</given-names></name><name><surname>Horning</surname><given-names>MP</given-names></name><name><surname>Gachuhi</surname><given-names>N</given-names></name><name><surname>Wilson</surname><given-names>B</given-names></name><name><surname>Jaiswal</surname><given-names>MS</given-names></name><name><surname>Befano</surname><given-names>B</given-names></name><etal/></person-group><article-title>An observational study of deep learning and automated evaluation of cervical images for cancer screening</article-title><source>J Natl Cancer Inst</source><volume>111</volume><fpage>923</fpage><lpage>932</lpage><year>2019</year><pub-id pub-id-type="pmid">30629194</pub-id><pub-id pub-id-type="doi">10.1093/jnci/djy225</pub-id></element-citation></ref>
<ref id="b8-MCO-16-2-02460"><label>8</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xue</surname><given-names>P</given-names></name><name><surname>Ng</surname><given-names>MT</given-names></name><name><surname>Qiao</surname><given-names>Y</given-names></name></person-group><article-title>The challenges of colposcopy for cervical cancer screening in LMICs and solutions by artificial intelligence</article-title><source>BMC Med</source><volume>18</volume><issue>169</issue><year>2020</year><pub-id pub-id-type="pmid">32493320</pub-id><pub-id pub-id-type="doi">10.1186/s12916-020-01613-x</pub-id></element-citation></ref>
<ref id="b9-MCO-16-2-02460"><label>9</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yuan</surname><given-names>C</given-names></name><name><surname>Yao</surname><given-names>Y</given-names></name><name><surname>Cheng</surname><given-names>B</given-names></name><name><surname>Cheng</surname><given-names>Y</given-names></name><name><surname>Li</surname><given-names>Y</given-names></name><name><surname>Li</surname><given-names>Y</given-names></name><name><surname>Liu</surname><given-names>X</given-names></name><name><surname>Cheng</surname><given-names>X</given-names></name><name><surname>Xie</surname><given-names>X</given-names></name><name><surname>Wu</surname><given-names>J</given-names></name><etal/></person-group><article-title>The application of deep learning based diagnostic system to cervical squamous intraepithelial lesions recognition in colposcopy images</article-title><source>Sci Rep</source><volume>10</volume><issue>11639</issue><year>2020</year><pub-id pub-id-type="pmid">32669565</pub-id><pub-id pub-id-type="doi">10.1038/s41598-020-68252-3</pub-id></element-citation></ref>
<ref id="b10-MCO-16-2-02460"><label>10</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xue</surname><given-names>Z</given-names></name><name><surname>Novetsky</surname><given-names>AP</given-names></name><name><surname>Einstein</surname><given-names>MH</given-names></name><name><surname>Marcus</surname><given-names>JZ</given-names></name><name><surname>Befano</surname><given-names>B</given-names></name><name><surname>Guo</surname><given-names>P</given-names></name><name><surname>Demarco</surname><given-names>M</given-names></name><name><surname>Wentzensen</surname><given-names>N</given-names></name><name><surname>Long</surname><given-names>LR</given-names></name><name><surname>Schiffman</surname><given-names>M</given-names></name><name><surname>Antani</surname><given-names>S</given-names></name></person-group><article-title>A demonstration of automated visual evaluation of cervical images taken with a smartphone camera</article-title><source>Int J Cancer</source><volume>147</volume><fpage>2416</fpage><lpage>2423</lpage><year>2020</year><pub-id pub-id-type="pmid">32356305</pub-id><pub-id pub-id-type="doi">10.1002/ijc.33029</pub-id></element-citation></ref>
<ref id="b11-MCO-16-2-02460"><label>11</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Miyagi</surname><given-names>Y</given-names></name><name><surname>Takehara</surname><given-names>K</given-names></name><name><surname>Nagayasu</surname><given-names>Y</given-names></name><name><surname>Miyake</surname><given-names>T</given-names></name></person-group><article-title>Application of deep learning to the classification of uterine cervical squamous epithelial lesion from colposcopy images combined with HPV types</article-title><source>Oncol Lett</source><volume>19</volume><fpage>1602</fpage><lpage>1610</lpage><year>2020</year><pub-id pub-id-type="pmid">31966086</pub-id><pub-id pub-id-type="doi">10.3892/ol.2019.11214</pub-id></element-citation></ref>
<ref id="b12-MCO-16-2-02460"><label>12</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tan</surname><given-names>X</given-names></name><name><surname>Li</surname><given-names>K</given-names></name><name><surname>Zhang</surname><given-names>J</given-names></name><name><surname>Wang</surname><given-names>W</given-names></name><name><surname>Wu</surname><given-names>B</given-names></name><name><surname>Wu</surname><given-names>J</given-names></name><name><surname>Li</surname><given-names>X</given-names></name><name><surname>Huang</surname><given-names>X</given-names></name></person-group><article-title>Automatic model for cervical cancer screening based on convolutional neural network: A retrospective, multicohort, multicenter study</article-title><source>Cancer Cell Int</source><volume>21</volume><issue>35</issue><year>2021</year><pub-id pub-id-type="pmid">33413391</pub-id><pub-id pub-id-type="doi">10.1186/s12935-020-01742-6</pub-id></element-citation></ref>
<ref id="b13-MCO-16-2-02460"><label>13</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Salim</surname><given-names>M</given-names></name><name><surname>W&#x00E5;hlin</surname><given-names>E</given-names></name><name><surname>Dembrower</surname><given-names>K</given-names></name><name><surname>Azavedo</surname><given-names>E</given-names></name><name><surname>Foukakis</surname><given-names>T</given-names></name><name><surname>Liu</surname><given-names>Y</given-names></name><name><surname>Smith</surname><given-names>K</given-names></name><name><surname>Eklund</surname><given-names>M</given-names></name><name><surname>Strand</surname><given-names>F</given-names></name></person-group><article-title>External evaluation of 3 commercial artificial intelligence algorithms for independent assessment of screening mammograms</article-title><source>JAMA Oncol</source><volume>6</volume><fpage>1581</fpage><lpage>1588</lpage><year>2020</year><pub-id pub-id-type="pmid">32852536</pub-id><pub-id pub-id-type="doi">10.1001/jamaoncol.2020.3321</pub-id></element-citation></ref>
<ref id="b14-MCO-16-2-02460"><label>14</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schaffter</surname><given-names>T</given-names></name><name><surname>Buist</surname><given-names>DSM</given-names></name><name><surname>Lee</surname><given-names>CI</given-names></name><name><surname>Nikulin</surname><given-names>Y</given-names></name><name><surname>Ribli</surname><given-names>D</given-names></name><name><surname>Guan</surname><given-names>Y</given-names></name><name><surname>Lotter</surname><given-names>W</given-names></name><name><surname>Jie</surname><given-names>Z</given-names></name><name><surname>Du</surname><given-names>H</given-names></name><name><surname>Wang</surname><given-names>S</given-names></name><etal/></person-group><article-title>Evaluation of combined artificial intelligence and radiologist assessment to interpret screening mammograms</article-title><source>JAMA Netw Open</source><volume>3</volume><issue>e200265</issue><year>2020</year><pub-id pub-id-type="pmid">32119094</pub-id><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2020.0265</pub-id></element-citation></ref>
<ref id="b15-MCO-16-2-02460"><label>15</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stuebs</surname><given-names>FA</given-names></name><name><surname>Schulmeyer</surname><given-names>CE</given-names></name><name><surname>Mehlhorn</surname><given-names>G</given-names></name><name><surname>Gass</surname><given-names>P</given-names></name><name><surname>Kehl</surname><given-names>S</given-names></name><name><surname>Renner</surname><given-names>SK</given-names></name><name><surname>Renner</surname><given-names>SP</given-names></name><name><surname>Geppert</surname><given-names>C</given-names></name><name><surname>Adler</surname><given-names>W</given-names></name><name><surname>Hartmann</surname><given-names>A</given-names></name><etal/></person-group><article-title>Accuracy of colposcopy-directed biopsy in detecting early cervical neoplasia: A retrospective study</article-title><source>Arch Gynecol Obstet</source><volume>299</volume><fpage>525</fpage><lpage>532</lpage><year>2019</year><pub-id pub-id-type="pmid">30367250</pub-id><pub-id pub-id-type="doi">10.1007/s00404-018-4953-8</pub-id></element-citation></ref>
<ref id="b16-MCO-16-2-02460"><label>16</label><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fatahi</surname><given-names>MN</given-names></name><name><surname>Meybodi</surname><given-names>NF</given-names></name><name><surname>Karimi-Zarchi</surname><given-names>M</given-names></name><name><surname>Allahqoli</surname><given-names>L</given-names></name><name><surname>Sekhavat</surname><given-names>L</given-names></name><name><surname>Gitas</surname><given-names>G</given-names></name><name><surname>Rahmani</surname><given-names>A</given-names></name><name><surname>Fallahi</surname><given-names>A</given-names></name><name><surname>Hassanlouei</surname><given-names>B</given-names></name><name><surname>Alkatout</surname><given-names>I</given-names></name></person-group><article-title>Accuracy if triple test versus colposcopy for the diagnosis of premalignant and malignant cervical lesions</article-title><source>Asian Pac J Cancer Prev</source><volume>21</volume><fpage>3501</fpage><lpage>3507</lpage><year>2020</year><pub-id pub-id-type="pmid">33369445</pub-id><pub-id pub-id-type="doi">10.31557/APJCP.2020.21.12.3501</pub-id></element-citation></ref>
</ref-list>
</back>
<floats-group>
<fig id="f1-MCO-16-2-02460" position="float">
<label>Figure 1</label>
<caption><p>Diagram of AI-image-assisted diagnosis for cervical lesions. The traditional biopsy routine for cervical cancer diagnosis is that gynecologists manually observe the uterine cervix with a colposcope and decide where to obtain a tissue sample for more detailed microscopic examination. However, colposcopes are large and expensive and gynecologists require a great deal of practical experience in deciding correctly from which part of the cervix is best to obtain the tissue. Smartscopy is cheap and simple improvement. This AI system can guide the selection of the best biopsy sites by doctors not yet well-practiced with such decisions. It could be expected to be of help to reduce the burden of gynecologists and expand to medical facilities in advancing countries. AI, artificial intelligence.</p></caption>
<graphic xlink:href="mco-16-02-02460-g00.tif" />
</fig>
<fig id="f2-MCO-16-2-02460" position="float">
<label>Figure 2</label>
<caption><p>Example of an annotated image. The left image is of a cervical pathological lesion processed with acetic acid prior to biopsy. The right image is annotated by a gynecologic oncologist, who specified the pathological lesion.</p></caption>
<graphic xlink:href="mco-16-02-02460-g01.tif" />
</fig>
<table-wrap id="tI-MCO-16-2-02460" position="float">
<label>Table I</label>
<caption><p>The distribution of images.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="middle">&#x00A0;</th>
<th align="center" valign="middle" colspan="2">Normal</th>
<th align="center" valign="middle" colspan="2">CIN1</th>
<th align="center" valign="middle" colspan="2">CIN2-3</th>
<th align="center" valign="middle" colspan="2">Invasive cancer</th>
<th align="center" valign="middle" colspan="2">Total</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Images</td>
<td align="center" valign="middle" colspan="2">120</td>
<td align="center" valign="middle" colspan="2">120</td>
<td align="center" valign="middle" colspan="2">113</td>
<td align="center" valign="middle" colspan="2">110</td>
<td align="center" valign="middle" colspan="2">463</td>
</tr>
<tr>
<td align="left" valign="middle">&#x00A0;</td>
<td align="center" valign="middle">Training</td>
<td align="center" valign="middle">Test</td>
<td align="center" valign="middle">Training</td>
<td align="center" valign="middle">Test</td>
<td align="center" valign="middle">Training</td>
<td align="center" valign="middle">Test</td>
<td align="center" valign="middle">Training</td>
<td align="center" valign="middle">Test</td>
<td align="center" valign="middle">Training</td>
<td align="center" valign="middle">Test</td>
</tr>
<tr>
<td align="left" valign="middle">&#x00A0;</td>
<td align="center" valign="middle">90</td>
<td align="center" valign="middle">30</td>
<td align="center" valign="middle">90</td>
<td align="center" valign="middle">30</td>
<td align="center" valign="middle">85</td>
<td align="center" valign="middle">28</td>
<td align="center" valign="middle">83</td>
<td align="center" valign="middle">27</td>
<td align="center" valign="middle">348</td>
<td align="center" valign="middle">115</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p>CIN, cervical intraepithelial neoplasia.</p></fn>
</table-wrap-foot>
</table-wrap>
<table-wrap id="tII-MCO-16-2-02460" position="float">
<label>Table II</label>
<caption><p>The accuracy of AI image diagnosis.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="middle">&#x00A0;</th>
<th align="center" valign="middle">Accuracy (&#x0025;)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Normal</td>
<td align="center" valign="middle">57.8</td>
</tr>
<tr>
<td align="left" valign="middle">CIN1</td>
<td align="center" valign="middle">35.4</td>
</tr>
<tr>
<td align="left" valign="middle">CIN2-3</td>
<td align="center" valign="middle">40.5</td>
</tr>
<tr>
<td align="left" valign="middle">Invasive cancer</td>
<td align="center" valign="middle">44.2</td>
</tr>
<tr>
<td align="left" valign="middle">Total</td>
<td align="center" valign="middle">43.5</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p>AI, artificial intelligence; CIN, cervical intraepithelial neoplasia.</p></fn>
</table-wrap-foot>
</table-wrap>
<table-wrap id="tIII-MCO-16-2-02460" position="float">
<label>Table III</label>
<caption><p>Accuracy of AI image diagnosis of each group.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="middle">&#x00A0;</th>
<th align="center" valign="middle">Group 1 25&#x0025; of training case (&#x0025;)</th>
<th align="center" valign="middle">Group 2 50&#x0025; of training case (&#x0025;)</th>
<th align="center" valign="middle">Group 3 75&#x0025; of training case(&#x0025;)</th>
<th align="center" valign="middle">Group 4 100&#x0025; of training case (&#x0025;)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Normal</td>
<td align="center" valign="middle">48.2</td>
<td align="center" valign="middle">47.7</td>
<td align="center" valign="middle">53.6</td>
<td align="center" valign="middle">57.8</td>
</tr>
<tr>
<td align="left" valign="middle">CIN1</td>
<td align="center" valign="middle">19.9</td>
<td align="center" valign="middle">29.7</td>
<td align="center" valign="middle">30.1</td>
<td align="center" valign="middle">35.4</td>
</tr>
<tr>
<td align="left" valign="middle">CIN2-3</td>
<td align="center" valign="middle">30.4</td>
<td align="center" valign="middle">29.5</td>
<td align="center" valign="middle">42.9</td>
<td align="center" valign="middle">40.5</td>
</tr>
<tr>
<td align="left" valign="middle">Invasive cancer</td>
<td align="center" valign="middle">54.1</td>
<td align="center" valign="middle">52.5</td>
<td align="center" valign="middle">46.3</td>
<td align="center" valign="middle">44.2</td>
</tr>
<tr>
<td align="left" valign="middle">Total</td>
<td align="center" valign="middle">36.4</td>
<td align="center" valign="middle">37.9</td>
<td align="center" valign="middle">42.1</td>
<td align="center" valign="middle">43.5</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p>AI, artificial intelligence; CIN, cervical intraepithelial neoplasia.</p></fn>
</table-wrap-foot>
</table-wrap>
<table-wrap id="tIV-MCO-16-2-02460" position="float">
<label>Table IV</label>
<caption><p>Significance of AI-assisted image diagnosis.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="middle">Lesions</th>
<th align="center" valign="middle">Initial</th>
<th align="center" valign="middle">AI-assisted</th>
<th align="center" valign="middle">P-value</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Normal</td>
<td align="center" valign="middle">518/800 (64.8&#x0025;)</td>
<td align="center" valign="middle">506/800 (63.3&#x0025;)</td>
<td align="center" valign="middle">0.57</td>
</tr>
<tr>
<td align="left" valign="middle">CIN 1</td>
<td align="center" valign="middle">435/800 (54.4&#x0025;)</td>
<td align="center" valign="middle">409/800 (51.1&#x0025;)</td>
<td align="center" valign="middle">0.21</td>
</tr>
<tr>
<td align="left" valign="middle">CIN 2-3</td>
<td align="center" valign="middle">435/800 (54.4&#x0025;)</td>
<td align="center" valign="middle">464/800 (58.0&#x0025;)</td>
<td align="center" valign="middle">0.14</td>
</tr>
<tr>
<td align="left" valign="middle">Invasive cancer</td>
<td align="center" valign="middle">311/800 (38.9&#x0025;)</td>
<td align="center" valign="middle">388/800 (48.5&#x0025;)</td>
<td align="center" valign="middle">&#x003C;0.01</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p>AI, artificial intelligence; CIN, cervical intraepithelial neoplasia.</p></fn>
</table-wrap-foot>
</table-wrap>
<table-wrap id="tV-MCO-16-2-02460" position="float">
<label>Table V</label>
<caption><p>Summary of AI reports.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="middle">Author (year)</th>
<th align="center" valign="middle">Subject</th>
<th align="center" valign="middle">(Refs.)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Hu <italic>et al</italic> (2019)</td>
<td align="left" valign="middle">Pioneer of automated visual evaluation of cervigrams</td>
<td align="center" valign="middle">(<xref rid="b7-MCO-16-2-02460" ref-type="bibr">7</xref>)</td>
</tr>
<tr>
<td align="left" valign="middle">Xue <italic>et al</italic> (2020)</td>
<td align="left" valign="middle">AI assistance in colposcopy imaging judgment</td>
<td align="center" valign="middle">(<xref rid="b8-MCO-16-2-02460" ref-type="bibr">8</xref>)</td>
</tr>
<tr>
<td align="left" valign="middle">Yuan <italic>et al</italic> (2020)</td>
<td align="left" valign="middle">High performance of AI diagnostic system</td>
<td align="center" valign="middle">(<xref rid="b9-MCO-16-2-02460" ref-type="bibr">9</xref>)</td>
</tr>
<tr>
<td align="left" valign="middle">Xue <italic>et al</italic> (2020)</td>
<td align="left" valign="middle">Automated visual evaluation on smartphones</td>
<td align="center" valign="middle">(<xref rid="b10-MCO-16-2-02460" ref-type="bibr">10</xref>)</td>
</tr>
<tr>
<td align="left" valign="middle">Miyagi <italic>et al</italic> (2020)</td>
<td align="left" valign="middle">AI colposcopy combined with HPV types</td>
<td align="center" valign="middle">(<xref rid="b11-MCO-16-2-02460" ref-type="bibr">11</xref>)</td>
</tr>
<tr>
<td align="left" valign="middle">Tan <italic>et al</italic> (2021)</td>
<td align="left" valign="middle">AI assistance in thin-prep cytological test images</td>
<td align="center" valign="middle">(<xref rid="b12-MCO-16-2-02460" ref-type="bibr">12</xref>)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p>AI, artificial intelligence; HPV, human papillomavirus.</p></fn>
</table-wrap-foot>
</table-wrap>
</floats-group>
</article>
