2024

Md. Rakibul Hasan, Md Rafsan Jani; Rahman, Mahmudur
21.02.2024.
Abstract | Links | BibTeX | Tags: Biomedical Image Annotation, CNN, DistilGPT2., Image retrieval, Multimodal Learning, ResNet50, ViT
@misc{nokey,
title = {Image and Text Feature Based Multimodal Learning for Multi-Label Classification of Radiology Images in Biomedical Literature},
author = {Md. Rakibul Hasan, Md Rafsan Jani and Mahmudur Rahman},
url = {https://www.insticc.org/node/TechnicalProgram/biostec/2024/presentationDetails/124384},
year = {2024},
date = {2024-02-21},
urldate = {2024-02-21},
abstract = {Biomedical images are crucial for diagnosing and planning treatments, as well as advancing scientific understanding of various ailments. To effectively highlight regions of interest (RoIs) and convey medical concepts, annotation markers like arrows, letters, or symbols are employed. However, annotating these images with appropriate medical labels poses a significant challenge. In this study, we propose a framework that leverages multimodal input features, including text/label features and visual features, to facilitate accurate annotation of biomedical images with multiple labels. Our approach integrates state-of-the-art models such as ResNet50 and Vision Transformers (ViT) to extract informative features from the images. Additionally, we employ Generative Pre-trained Distilled-GPT2 (Transformer based Natural Language Processing architecture) to extract textual features, leveraging their natural language understanding capabilities. This combination of image and text modalities allows for a more comprehensive representation of the biomedical data, leading to improved annotation accuracy. By combining the features extracted from both image and text modalities, we trained a simplified Convolutional Neural Network (CNN) based multi-classifier to learn the image-text relations and predict multi-labels for multi-modal radiology images. We used ImageCLEFmedical 2022 and 2023 datasets to demonstrate the effectiveness of our framework. This dataset likely contains a diverse range of biomedical images, enabling the evaluation of the framework’s performance under realistic conditions. We have achieved promising results with the F1 score of 0.508. Our proposed framework exhibits potential performance in annotating biomedical images with multiple labels, contributing to improved image understanding and analysis in the medical image processing domain.},
keywords = {Biomedical Image Annotation, CNN, DistilGPT2., Image retrieval, Multimodal Learning, ResNet50, ViT},
pubstate = {published},
tppubtype = {presentation}
}

J. Aina B. I. Adeika, T. Ibirinde; Pramanik, S.
Ensemble and Transformer Models for Infectious Disease Prediction Conference
2023 IEEE 23rd International Conference on Bioinformatics and Bioengineering (BIBE), Dayton, OH, USA, 2023, 2024.
Abstract | Links | BibTeX | Tags: Infectious diseases;Tuberculosis;Genomics;Predictive models;Transformers;Bioinformatics;Monitoring;Infectious diseases;BERT;XLNET;RoBERTa;Disease Prediction;Ensemble techniques;Genomic sequence
@conference{nokey,
title = {Ensemble and Transformer Models for Infectious Disease Prediction},
author = {B. I. Adeika, J. Aina, T. Ibirinde, T. Adeyemi, M. M. Rahman and S. Pramanik},
url = {https://ieeexplore.ieee.org/document/10431838/authors},
doi = {10.1109/BIBE60311.2023.00068},
year = {2024},
date = {2024-02-19},
urldate = {2024-02-19},
pages = {377-384},
publisher = {2023 IEEE 23rd International Conference on Bioinformatics and Bioengineering (BIBE), Dayton, OH, USA, 2023},
abstract = {Infectious diseases persist as an urgent global challenge, necessitating innovative strides in prediction and monitoring. This study delves into the intricate realm of infectious disease prediction, employing three transformer models—BERT, XLNET, and RoBERTa. The central objective of this research was to craft a framework for infectious disease prediction, significantly enhancing capabilities in disease monitoring, detection, and outbreak response. The approach entailed receiving a set of translated protein sequences from various infectious diseases and leveraging these sequences to predict each disease with the models. This methodology advanced infectious disease prediction and monitoring by expediting the analysis of genomic data, enabling the identification of distinctive patterns, mutations, and signatures associated with specific infectious agents. The dataset comprised genomic sequences from diseases such as Zika, Ebola, SARS-CoV-2, Influenza A, Influenza B, Tuberculosis, along with sequences from non-infected individuals. Model evaluation encompassed essential metrics, including accuracy, precision, recall, and the F1 score. In our quest for heightened precision, we also devised ensemble techniques to harness the collective power of all three models, yielding accuracies of 92% (Majority Voting) and 85% (Weighted Average). Leveraging DNA sequences translated into protein sequences, this study contributed to advancing our understanding and management of infectious diseases on a global scale.},
keywords = {Infectious diseases;Tuberculosis;Genomics;Predictive models;Transformers;Bioinformatics;Monitoring;Infectious diseases;BERT;XLNET;RoBERTa;Disease Prediction;Ensemble techniques;Genomic sequence},
pubstate = {published},
tppubtype = {conference}
}
2023

Dixon, Jose; Rahman, Md. Mahmudur
In: Machine Learning and Knowledge Extraction, 2023.
Abstract | Links | BibTeX | Tags:
@article{nokey,
title = {Statistical Analysis of Imbalanced Classification with Training Size Variation and Subsampling on Datasets of Research Papers in Biomedical Literature},
author = {Jose Dixon and Md. Mahmudur Rahman},
url = {https://www.mdpi.com/2504-4990/5/4/95},
doi = {10.3390/make5040095},
year = {2023},
date = {2023-12-11},
urldate = {2023-12-11},
issuetitle = {Statistical Analysis of Imbalanced Classification with Training Size Variation and Subsampling on Datasets of Research Papers in Biomedical Literature},
journal = {Machine Learning and Knowledge Extraction},
abstract = {https://www.mdpi.com/2504-4990/5/4/95},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Emon, Md Ismail Siddiqi; Rahman, Md Mahmudur
ImageCLEF - The CLEF Cross Language Image Retrieval Track Conference and Labs of the Evaluation Forum, 2023.
Abstract | Links | BibTeX | Tags:
@conference{nokey,
title = {Media Interestingness Prediction in ImageCLEFfusion 2023 with Dense Architecture-based Ensemble & Scaled Gradient Boosting Regressor Model},
author = {Md Ismail Siddiqi Emon and Md Mahmudur Rahman},
url = {https://www.semanticscholar.org/paper/Media-Interestingness-Prediction-in-ImageCLEFfusion-Emon-Rahman/bc0e921f0f1af7578e40259cb90cf465fbb0642d},
year = {2023},
date = {2023-08-19},
urldate = {2023-08-19},
publisher = {Conference and Labs of the Evaluation Forum},
organization = {ImageCLEF - The CLEF Cross Language Image Retrieval Track},
abstract = {The field of computer vision plays a key role in managing, processing, analyzing, and
interpreting multimedia data in diverse applications. Visual interestingness in
multimedia contents is crucial for many practical applications, such as search and
recommendation. Determining the interestingness of a particular piece of media content
and selecting the highest-value item in terms of content analysis, viewers’ perspective,
content classification, and scoring media are sophisticated tasks to perform due to the
heavily subjective nature. This work presents the approaches of the CS_Morgan team
by participating in the media interestingness prediction task under ImageCLEFfusion
2023 benchmark evaluation. We experimented with two ensemble methods which
contain a dense architecture and a gradient boosting scaled architecture. For the dense
architecture, several hyperparameters tunings are performed and the output scores of
all the inducers after the dense layers are combined using min-max rule. The gradient
boost estimator provides an additive model in staged forward propagation, which
allows an optimized loss function. For every step in the ensemble gradient boosting
scaled (EGBS) architecture, a regression tree is fitted to the negative gradient of the
loss function. We achieved the best accuracy with a MAP@10 score of 0.1287 by
using the ensemble EGBS.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
interpreting multimedia data in diverse applications. Visual interestingness in
multimedia contents is crucial for many practical applications, such as search and
recommendation. Determining the interestingness of a particular piece of media content
and selecting the highest-value item in terms of content analysis, viewers’ perspective,
content classification, and scoring media are sophisticated tasks to perform due to the
heavily subjective nature. This work presents the approaches of the CS_Morgan team
by participating in the media interestingness prediction task under ImageCLEFfusion
2023 benchmark evaluation. We experimented with two ensemble methods which
contain a dense architecture and a gradient boosting scaled architecture. For the dense
architecture, several hyperparameters tunings are performed and the output scores of
all the inducers after the dense layers are combined using min-max rule. The gradient
boost estimator provides an additive model in staged forward propagation, which
allows an optimized loss function. For every step in the ensemble gradient boosting
scaled (EGBS) architecture, a regression tree is fitted to the negative gradient of the
loss function. We achieved the best accuracy with a MAP@10 score of 0.1287 by
using the ensemble EGBS.

Md Rakibul Hasan, Oyebisi Layode; Rahman, Md Mahmudur
ImageCLEF - The CLEF Cross Language Image Retrieval Track Conference and Labs of the Evaluation Forum, 2023.
Abstract | Links | BibTeX | Tags:
@conference{nokey,
title = {Concept Detection and Caption Prediction in ImageCLEFmedical Caption 2023 with Convolutional Neural Networks, Vision and Text-to-Text Transfer Transformers},
author = {Md Rakibul Hasan, Oyebisi Layode and Md Mahmudur Rahman},
url = {https://www.semanticscholar.org/paper/Concept-Detection-and-Caption-Prediction-in-Caption-Hasan-Layode/9abbbe401135a09188d4428c17b75abd0c4bc6b7},
year = {2023},
date = {2023-08-17},
urldate = {2023-08-17},
publisher = {Conference and Labs of the Evaluation Forum},
organization = {ImageCLEF - The CLEF Cross Language Image Retrieval Track},
abstract = {This work discusses the participation of CS _ Morgan in the Concept Detection and Caption Prediction tasks of the ImageCLEFmedical 2023 Caption benchmark evaluation campaign. The goal of this task is to automatically identify relevant concepts and their locations in images, as well as generate coherent captions for the images. The dataset used for this task is a subset of the extended Radiology Objects in Context (ROCO) dataset. The implementation approach employed by us involved the use of pre-trained Convolutional Neural Networks (CNNs), Vision Transformer (ViT), and Text-to-Text Transfer Transformer (T5) architectures. These models were leveraged to handle the different aspects of the tasks, such as concept detection and caption generation. In the Concept Detection task, the objective was to classify multiple concepts associated with each image. We utilized several deep learning architectures with ‘sigmoid’ activation to enable multilabel classification using the Keras framework. We submitted a total of five (5) runs for this task, and the best run achieved an F1 score of 0.4834, indicating its effectiveness in detecting relevant concepts in the images. For the Caption Prediction task, we successfully submitted eight (8) runs. Our approach involved combining the ViT and T5 models to generate captions for the images. For the caption prediction task, the ranking is based on the BERTScore, and our best run achieved a score of 0.5819 based on generating captions using the fine-tuned T5 model from keywords generated using the pre-trained ViT as the encoder.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Rahman, Md Mahmudur
2023, ISBN: 978-3-0365-8128-6.
Abstract | Links | BibTeX | Tags:
@book{nokey,
title = {Artificial Intelligence (AI) and Machine Learning (ML) in Medical Imaging Informatics towards Diagnostic Decision Making},
author = {Md Mahmudur Rahman},
url = {https://www.mdpi.com/books/reprint/7570-artificial-intelligence-ai-and-machine-learning-ml-in-medical-imaging-informatics-towards},
doi = {https://doi.org/10.3390/books978-3-0365-8129-3},
isbn = {978-3-0365-8128-6},
year = {2023},
date = {2023-07-22},
urldate = {2023-07-22},
issue = {Special},
abstract = {In recent years, AI/ML tools have become more prevalent in the fields of medical imaging and imaging informatics, where systems are already outperforming physicians in a range of domains, such as in the classification of retinal fundus images in ophthalmology, chest X-rays in radiology, and skin cancer detection in dermatology, among many others. It has recently emerged as one of the fastest growing research areas given the evolution of techniques in radiology, molecular imaging, anatomical imaging, and functional imaging for detection, segmentation, diagnosis, annotation, summarization, and prediction. The ongoing innovations in this exciting and promising field play a powerful role in influencing the lives of millions through health, safety, education, and other opportunities intended to be shared across all segments of society. To achieve further progress, this Special Issue (SI) invited both research and review-type manuscripts to showcase ongoing research progress and development based on applications of AI/ML (especially DL techniques) in medical imaging to influence human health and healthcare systems in the diagnostic decision-making process. The SI published fourteen articles after a rigorous peer-review process across the spectrum of medical imaging modalities and the diversity of specialties depending on imaging techniques from radiology, dermatology, pathology, colonoscopy, endoscopy, etc.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}

Md Mahmudur Rahman Akinniyi Oluwatunmise, Harpal Singh Sandhu; Khalifa, Fahmi
Multi-Stage Classification of Retinal OCT Using Multi-Scale Ensemble Deep Architecture Journal Article
In: Bioengineering , vol. 10, iss. 7, 2023.
@article{nokey,
title = {Multi-Stage Classification of Retinal OCT Using Multi-Scale Ensemble Deep Architecture},
author = {Akinniyi Oluwatunmise, Md Mahmudur Rahman, Harpal Singh Sandhu, Ayman El-Baz and Fahmi Khalifa},
url = {https://www.mdpi.com/2306-5354/10/7/823},
doi = {https://doi.org/10.3390/bioengineering10070823},
year = {2023},
date = {2023-07-10},
urldate = {2023-07-10},
journal = {Bioengineering },
volume = {10},
issue = {7},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

P. O. Abiodun, & Owolabi
Evaluating the Impact of Experiment-Centric Pedagogy on Civil Engineering Undergraduates’ Motivation Conference
2023.
Abstract | Links | BibTeX | Tags: Diversity
@conference{nokey,
title = {Evaluating the Impact of Experiment-Centric Pedagogy on Civil Engineering Undergraduates’ Motivation},
author = {Abiodun, P. O., & Owolabi,, O. A., & Efe, S., & Shokouhian, M., & Aladeokin, O., & Olude, A. I., & Bazyar Shourabi, N., & Ikiriko, S., & Bista, K., & Gaulee, U., & Efe, F., & Rahman, M. M., & Abedoh, H., & Ladeji-Osias, J. K.},
editor = {2023 ASEE Annual Conference & Exposition, Committee on Effective Teaching Presents: Evaluation, Assessment, & Performance},
url = {https://peer.asee.org/43440},
doi = {10.18260/1-2--43440},
year = {2023},
date = {2023-06-25},
pages = {19},
abstract = {Motivation is a strong factor in effective learning, and it has an impact on learning outcomes. Students' motivation can make or break their ability to grasp abstract courses which predominate courses taught in Civil Engineering. Students that are more motivated to study, stick with it longer, and put in more effort to perform better in class, hands-on experiments, and standardised tests. This study is designed to answer the following questions: (i) Is there a significant difference between the motivation of Civil Engineering undergraduates pre and post implementation of experiment-centric pedagogy? (ii) Is there a significant difference between Civil Engineering undergraduates’ motivation pre and post implementation of experiment-centric pedagogy based on gender? and (iii) Is there any significant association between socio-demographic characteristics of Civil Engineering undergraduates and their motivation? Motivation constructs considered in the present study include intrinsic goal orientation, task value, expectancy component, test anxiety, critical thinking, and metacognition. Undergraduates’ responses shall be collected using 7-point Likert-scales, and statistical analyses done using Statistical Package for Social Scientists (SPSS 25.0) at a statistical significance set at 0.05.},
keywords = {Diversity},
pubstate = {published},
tppubtype = {conference}
}

O. A. Owolabi, & Abiodun
2023 ASEE Annual Conference & Exposition, Baltimore , Maryland, 2023.
Abstract | Links | BibTeX | Tags: Diversity and NSF Grantees Poster Session
@conference{nokey,
title = {Utilization of Social Management Theoretical Framework and Program Management Tool to Successfully Manage Large Multi-Department STEM Projects},
author = {Owolabi, O. A., & Abiodun, P. O., & Asahiah, A. O., & Abedoh, H., & Shokouhian, M., & Bazyar Shourabi, N., & Bista, K., & Gaulee, U., & Rahman, M. M., & Ladeji-Osias, J. K., & Emiola-Owolabi & O. V.},
url = {https://peer.asee.org/42777},
year = {2023},
date = {2023-06-25},
urldate = {2023-06-25},
pages = {24},
publisher = {2023 ASEE Annual Conference & Exposition},
address = {Baltimore , Maryland},
abstract = {The Adapting an Experiment-centric Teaching Approach to Increase Student Achievement in Multiple STEM Disciplines (ETA-STEM) NSF program is an experiment-focused hands-teaching teaching pedagogy developed to promote motivation and academic achievement across seven STEM disciplines. The program is a large educational program with multi-Department STEM projects comprising about 200 tasks and 40 personnel. To facilitate the successful implementation of the ETA-STEM program, an efficient project management tool called Smartsheet was adopted to manage all the tasks to be carried out and the activities involved. The Smartsheet software has helped to efficiently facilitate project coordination, scheduling deliverables, communicating with and assigning tasks to project team members, monitoring tasks performance, and evaluation. The Smartsheet is a project management tool developed for coordinating and monitoring project activities, promoting productive guidance, efficient communication, appropriate supervision of the project team, optimization of the allocated necessary inputs, and their application to meeting the program’s objectives. The paper describes the functions and the operations of this tool utilized in managing the ETA-STEM project for the past three years. Additionally, the paper elaborates on the social management theoretical framework on which the project management principles are hinged. The impactful outcomes of the ETA-STEM program in increasing academic performance as well as improving key constructs associated with student success such as motivation, epistemic and perceptual curiosity, engineering identity, and self-efficacy presented in the paper reveal an efficient management strategy anchored on the the social management theoretical framework and facilitated by the project management tool.},
keywords = {Diversity and NSF Grantees Poster Session},
pubstate = {published},
tppubtype = {conference}
}

M. M. Rahman A. M. John-Otumu, O. C. Nwokonkwo; Onuoha, M. C.
AI-Based Techniques for Online Social Media Network Sentiment Analysis: A Methodical Review Journal Article
In: International Journal of Computer Science Engineering and Information Technology 16(12), vol. 16, no. 12, 2023.
Abstract | Links | BibTeX | Tags:
@article{nokey,
title = {AI-Based Techniques for Online Social Media Network Sentiment Analysis: A Methodical Review},
author = {A. M. John-Otumu, M. M. Rahman, O. C. Nwokonkwo and M. C. Onuoha},
url = {https://publications.waset.org/10012817/ai-based-techniques-for-online-social-media-network-sentiment-analysis-a-methodical-review},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {International Journal of Computer Science Engineering and Information Technology 16(12)},
volume = {16},
number = {12},
abstract = {Online social media networks have long served as a primary arena for group conversations, gossip, text-based information sharing and distribution. The use of natural language processing techniques for text classification and unbiased decision making has not been far-fetched. Proper classification of these textual information in a given context has also been very difficult. As a result, a systematic review was conducted from previous literature on sentiment classification and AI-based techniques. The study was done in order to gain a better understanding of the process of designing and developing a robust and more accurate sentiment classifier that could correctly classify social media textual information of a given context between hate speech and inverted compliments with a high level of accuracy using the knowledge gain from the evaluation of different artificial intelligence techniques reviewed. The study evaluated over 250 articles from digital sources like ACM digital library, Google Scholar, and IEEE Xplore; and whittled down the number of research to 52 articles. Findings revealed that deep learning approaches such as Convolutional Neural Network (CNN), Recurrent Neural Network (RNN), Bidirectional Encoder Representations from Transformer (BERT), and Long Short-Term Memory (LSTM) outperformed various machine learning techniques in terms of performance accuracy. A large dataset is also required to develop a robust sentiment classifier. Results also revealed that data can be obtained from places like Twitter, movie reviews, Kaggle, Stanford Sentiment Treebank (SST), and SemEval Task4 based on the required domain. The hybrid deep learning techniques like CNN+LSTM, CNN+ Gated Recurrent Unit (GRU), CNN+BERT outperformed single deep learning techniques and machine learning techniques. Python programming language outperformed Java programming language in terms of development simplicity and AI-based library functionalities. Finally, the study recommended the findings obtained for building robust sentiment classifier in the future.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Md Mahmudur Rahman, Oyebisi Francis Layode; Tasmeer Alam,
SYSTEM AND METHOD FOR AUTOMATED DIAGNOSIS OF SKIN CANCER TYPES FROM DERMOSCOPIC IMAGES Patent
2023.
Abstract | Links | BibTeX | Tags:
@patent{nokey,
title = {SYSTEM AND METHOD FOR AUTOMATED DIAGNOSIS OF SKIN CANCER TYPES FROM DERMOSCOPIC IMAGES},
author = {Md Mahmudur Rahman, Oyebisi Francis Layode and Tasmeer Alam,},
url = {https://www.researchgate.net/publication/371247001_System_and_method_for_automated_diagnosis_of_skin_cancer_types_from_dermoscopic_images_Patent_No_11538577},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
abstract = {Disclosed is a content-based image retrieval (CBIR) system
and related methods that serve as a diagnostic aid for
diagnosing whether a dermoscopic image correlates to a skin
cancer type. Systems and methods according to aspects of
the invention use as a reference a set of images of pathologically confirmed benign or malignant past cases from a
collection of different classes that are of high similarity to
the unknown new case in question, along with their diagnostic profiles. Systems and methods according to aspects of
the invention predict what class of skin cancer is associated
with a particular patient skin lesion, and may be employed
as a diagnostic aid for general practitioners and dermatologists.},
keywords = {},
pubstate = {published},
tppubtype = {patent}
}
and related methods that serve as a diagnostic aid for
diagnosing whether a dermoscopic image correlates to a skin
cancer type. Systems and methods according to aspects of
the invention use as a reference a set of images of pathologically confirmed benign or malignant past cases from a
collection of different classes that are of high similarity to
the unknown new case in question, along with their diagnostic profiles. Systems and methods according to aspects of
the invention predict what class of skin cancer is associated
with a particular patient skin lesion, and may be employed
as a diagnostic aid for general practitioners and dermatologists.
2021

Mehravaran, Shiva; Dehzangi, Iman; Rahman, Md Mahmudur
Interocular Symmetry Analysis of Corneal Elevation Using the Fellow Eye as the Reference Surface and Machine Learning Journal Article
In: Healthcare, vol. 9, no. 12, 2021.
Abstract | Links | BibTeX | Tags:
@article{nokey,
title = {Interocular Symmetry Analysis of Corneal Elevation Using the Fellow Eye as the Reference Surface and Machine Learning},
author = {Shiva Mehravaran and Iman Dehzangi and Md Mahmudur Rahman},
doi = {10.3390/healthcare9121738},
year = {2021},
date = {2021-12-16},
urldate = {2021-12-16},
journal = {Healthcare},
volume = {9},
number = {12},
abstract = {Unilateral corneal indices and topography maps are routinely used in practice, however, although there is consensus that fellow-eye asymmetry can be clinically significant, symmetry studies are limited to local curvature and single-point thickness or elevation measures. To improve our current practices, there is a need to devise algorithms for generating symmetry colormaps, study and categorize their patterns, and develop reference ranges for new global discriminative indices for identifying abnormal corneas. In this work, we test the feasibility of using the fellow eye as the reference surface for studying elevation symmetry throughout the entire corneal surface using 9230 raw Pentacam files from a population-based cohort of 4613 middle-aged adults. The 140 × 140 matrix of anterior elevation data in these files were handled with Python to subtract matrices, create color-coded maps, and engineer features for machine learning. The most common pattern was a monochrome circle (“flat”) denoting excellent mirror symmetry. Other discernible patterns were named “tilt”, “cone”, and “four-leaf”. Clustering was done with different combinations of features and various algorithms using Waikato Environment for Knowledge Analysis (WEKA). Our proposed approach can identify cases that may appear normal in each eye individually but need further testing. This work will be enhanced by including data of posterior elevation, thickness, and common diagnostic indices.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

John-Otumu, Adetokunbo MacGregor; Rahman, Md Mahmudur; Oko, Christiana Ugochinyere
An Efficient Phishing Website Detection Plugin Service for Existing Web Browsers Using Random Forest Classifier Journal Article
In: American Journal of Artificial Intelligence, vol. 5, no. 2, pp. 66-75, 2021.
Abstract | Links | BibTeX | Tags:
@article{nokey,
title = {An Efficient Phishing Website Detection Plugin Service for Existing Web Browsers Using Random Forest Classifier},
author = {Adetokunbo MacGregor John-Otumu and Md Mahmudur Rahman and Christiana Ugochinyere Oko},
doi = {10.11648/j.ajai.20210502.13},
year = {2021},
date = {2021-11-05},
urldate = {2021-11-05},
journal = {American Journal of Artificial Intelligence},
volume = {5},
number = {2},
pages = {66-75},
abstract = {An efficient phishing website detection plugin service was developed using machine learning technique based on the prevalent phishing threat while using existing web browsers in critical online transactions. The study gathered useful information from 27 published articles and dataset consisting of 11,000 data points with 30 features downloaded from phishtank. A unique architectural framework for detecting phishing websites was designed using random forest machine learning classifier based the aim and objectives of the study. The model was trained with 90% (9,900) of the dataset and tested with 10% (1,100) using Python programming language for better efficiency. Microsoft Visual Studio Code, Jupiter Notebook, Anaconda Integrated Development Environment, HTML/CSS and JavaScript was used in developing the frontend of the model for easy integration into existing web browsers. The proposed model was also modeled using use-case and sequence diagrams to test its internal functionalities. The result revealed that the proposed model had an accuracy of 0.96, error rate of 0.04, precision of 0.97, recall value of 0.99 and f1-score of 0.98 which far outperform other models developed based on literatures. Future recommendations should focus on improved security features, more phishing adaptive learning properties, and so on, so that it can be reasonably applied to other web browsers in accurately detecting real-world phishing situations using advanced algorithms such as hybridized machine learning and deep learning techniques.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Ahangari, Samira; Jeihani1, Mansoureh; Ardeshiri, Anam; Rahman, Md Mahmudur; Dehzangi, Abdollah
Enhancing the Performance of a Model to Predict Driving Distraction with the Random Forest Classifier Journal Article
In: Journal of the Transportation Research Board, vol. 2675, no. 11, pp. 612-622, 2021.
Abstract | Links | BibTeX | Tags:
@article{nokey,
title = {Enhancing the Performance of a Model to Predict Driving Distraction with the Random Forest Classifier},
author = {Samira Ahangari and Mansoureh Jeihani1 and Anam Ardeshiri and Md Mahmudur Rahman and Abdollah Dehzangi},
doi = {10.1177/03611981211018695},
year = {2021},
date = {2021-08-24},
urldate = {2021-08-24},
journal = {Journal of the Transportation Research Board},
volume = {2675},
number = {11},
pages = {612-622},
abstract = {Distracted driving is known to be one of the main causes of crashes in the United States, accounting for about 40% of all crashes. Drivers’ situational awareness, decision-making, and driving performance are impaired as a result of temporarily diverting their attention from the primary task of driving to other unrelated tasks. Detecting driver distraction would help in adapting the most effective countermeasures. To tackle this problem, we employed a random forest (RF) classifier, one of the best classifiers that has attained promising results for a wide range of problems. Here, we trained RF using the data collected from a driving simulator, in which 92 participants drove under six different distraction scenarios of handheld calling, hands-free calling, texting, voice command, clothing, and eating/drinking on four different road classes (rural collector, freeway, urban arterial, and local road in a school zone). Various driving performance measures such as speed, acceleration, throttle, lane changing, brake, collision, and offset from the lane center were investigated. Using the RF method, we achieved 76.5% prediction accuracy on the independent test set, which is over 8.2% better than results reported in previous studies. We also obtained a 76.6% true positive rate, which is 14% better than those reported in previous studies. Such results demonstrate the preference of RF over other machine learning methods to identify driving distractions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Layode, Oyebisi; Rahman, Md. Mahmudur
2020 International Conference on Computational Science and Computational Intelligence (CSCI), IEEE, 2021.
Abstract | Links | BibTeX | Tags:
@conference{nokey,
title = {A Chest X-ray Image Retrieval System for COVID-19 Detection using Deep Transfer Learning and Denoising Auto Encoder},
author = {Oyebisi Layode and Md. Mahmudur Rahman
},
doi = {10.1109/CSCI51800.2020.00301},
year = {2021},
date = {2021-06-23},
urldate = {2021-06-23},
booktitle = {2020 International Conference on Computational Science and Computational Intelligence (CSCI)},
publisher = {IEEE},
abstract = {The COVID-19 pandemic is the defining global health crisis of our time which is currently challenging families, communities, health care systems, and government all over the world. It is critical to detect and isolate the positive cases as early as possible for timely treatment to prevent the further spread of the virus. It was found in few early studies that patients present abnormalities in chest radiography images that are characteristic of those infected with COVID-19. In the current context, a rapid, accessible and automated screening tool based on image processing of chest X-rays (CXRs) would be much needed as a quick alternative to PCR testing, especially with commonly available X-ray machines and without the dedicated test kits in labs and hospitals. Several classifications based approaches have been proposed recently with encouraging results to detect pneumonia based on CXRs using supervised deep transfer learning techniques based on Convolutional Neural Networks (CNNs). These black box approaches are mainly non-interactive in nature and their prediction represents just a cue to the radiologist. This work focuses on issues related to the development of such an automated system for CXRs by performing discriminative feature learning using deep neural networks with a purely data driven approach and retrieving images based on an unknown query image and performing retrieval evaluation on currently available benchmark datasets towards the goal of realistic comparison and real clinical integration. The system is trained and tested on an image collection of 1700 CXRs obtained from two different resources with encouraging results based on precision and recall measures in individual deep feature spaces. It is hoped that the proposed system as diagnostic aid would reduce the visual observation error of human operators and enhance sensitivity in testing for Covid-19 detection.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Ahangari1, Samira; Jeihani, Mansoureh; Rahman, Md Mahmudur; Dehzangi, Abdollah
Predicting Driving Distraction Patterns in Different Road Classes Using A Support Vector Machine Journal Article
In: International Journal for Traffic and Transport Engineering (IJTTE), vol. 11, 2021.
Abstract | Links | BibTeX | Tags:
@article{nokey,
title = {Predicting Driving Distraction Patterns in Different Road Classes Using A Support Vector Machine},
author = {Samira Ahangari1 and Mansoureh Jeihani and Md Mahmudur Rahman and Abdollah Dehzangi},
doi = {10.7708/ijtte.2021.11(1).06},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {International Journal for Traffic and Transport Engineering (IJTTE)},
volume = {11},
abstract = {This study investigates driving behavior under distraction on four different road
classes – freeway, urban arterial, rural, and local road in a school zone – using a high-fidelity
driving simulator. Some 92 younger participants from a reasonably diverse sociodemographic
background drove a realistic midsize network in the Baltimore metropolitan area and were
exposed to different distractions. A total of 1,952 simulation runs were conducted. An ANOVA
and Tukey Post Hoc analysis showed that distracted driving behavior demonstrates different
patterns on various roads. This research developed a support vector machine model that
achieved distraction prediction ability among different routes with an accuracy of 94.24%,
which to the best of our knowledge, is the best for such a task. The results indicate that driver
distraction prediction models probably would be more accurate if developed separately for
each road class.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
classes – freeway, urban arterial, rural, and local road in a school zone – using a high-fidelity
driving simulator. Some 92 younger participants from a reasonably diverse sociodemographic
background drove a realistic midsize network in the Baltimore metropolitan area and were
exposed to different distractions. A total of 1,952 simulation runs were conducted. An ANOVA
and Tukey Post Hoc analysis showed that distracted driving behavior demonstrates different
patterns on various roads. This research developed a support vector machine model that
achieved distraction prediction ability among different routes with an accuracy of 94.24%,
which to the best of our knowledge, is the best for such a task. The results indicate that driver
distraction prediction models probably would be more accurate if developed separately for
each road class.
2020

Rahman, Md Mahmudur; Sharker, Monir H.; Paudel, Roshan
An Effective Approach to Teach an Introductory Computer Science Course with Computational Thinking and Flow-Chart Based Visual Programming Conference
2020 IEEE Frontiers in Education Conference (FIE), IEEE, 2020.
@conference{nokey,
title = {An Effective Approach to Teach an Introductory Computer Science Course with Computational Thinking and Flow-Chart Based Visual Programming},
author = {Md Mahmudur Rahman and Monir H. Sharker and Roshan Paudel},
year = {2020},
date = {2020-12-04},
urldate = {2020-12-04},
booktitle = {2020 IEEE Frontiers in Education Conference (FIE)},
publisher = {IEEE},
abstract = {This Research to Practice Full Paper presents our experience of positive outcomes with increased motivation and retention in teaching an introductory Computer Science course with Python programming. Without reinventing the wheel, we infused few well established pedagogies by integrating and evaluating Computational Thinking (CT) skills in a meaningful way. We integrated CT with existing curriculum alongside programming and teaching general problem-solving techniques with a flowchart-based programming environment and without using specific programming concepts or languages at the beginning. Our aim here is not only to teach a programming language per se, but also to teach, at the beginning, the different ways of problem solving, logical reasoning, algorithm design, and programming constructs with minimal or no emphasis on syntax. A positive learning experience is successfully developed for our students by using appropriate pedagogies and strategies. To evaluate the impact of this infusion, a pre- and post-survey as well as a pre- and post-CT test were conducted on student cohort in different sections. The statistical analysis of the survey and test results show evidence of improvement in student's problem solving and coding skills as well as increase in motivation towards programming.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Rahman, Md Mahmudur; Sharker, Monir H; Paudel, Roshan
Poster: Impact of Infusing Interactive and Collaborative Learning in Teaching Introductory Programming in a Dynamic Class Proceedings Article
In: SIGCSE 2020: Special Interest Group Computer Science Education, 2020.
BibTeX | Tags:
@inproceedings{impact_2020,
title = {Poster: Impact of Infusing Interactive and Collaborative Learning in Teaching Introductory Programming in a Dynamic Class},
author = {Md Mahmudur Rahman and Monir H Sharker and Roshan Paudel},
year = {2020},
date = {2020-03-01},
booktitle = {SIGCSE 2020: Special Interest Group Computer Science Education},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Rahman, Md Mahmudur; Sharker, Monir H; Paudel, Roshan
An Effective Approach to Teach an Introductory Computer Science Course with Computational Thinking and Flow-Chart Based Visual Programming Proceedings Article
In: 2020 IEEE Frontiers in Education Conference (FIE), pp. 1–7, IEEE 2020.
BibTeX | Tags:
@inproceedings{rahman2020effective,
title = {An Effective Approach to Teach an Introductory Computer Science Course with Computational Thinking and Flow-Chart Based Visual Programming},
author = {Md Mahmudur Rahman and Monir H Sharker and Roshan Paudel},
year = {2020},
date = {2020-01-01},
booktitle = {2020 IEEE Frontiers in Education Conference (FIE)},
pages = {1--7},
organization = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Rahman, Md Mahmudur; Sharker, Monir H; Paudel, Roshan
Active and Collaborative Learning Based Dynamic Instructional Approach in Teaching Introductory Computer Science Course with Python Programming Proceedings Article
In: 2020 IEEE Integrated STEM Education Conference (ISEC), pp. 1–7, IEEE 2020.
BibTeX | Tags:
@inproceedings{rahman2020active,
title = {Active and Collaborative Learning Based Dynamic Instructional Approach in Teaching Introductory Computer Science Course with Python Programming},
author = {Md Mahmudur Rahman and Monir H Sharker and Roshan Paudel},
year = {2020},
date = {2020-01-01},
booktitle = {2020 IEEE Integrated STEM Education Conference (ISEC)},
pages = {1--7},
organization = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2019

Rahman, Md Mahmudur
Poster: Teaching Programming with Interactive & Collaborative Learning Proceedings Article
In: AAC&U’s Transforming STEM Higher Education conference, 2019.
BibTeX | Tags:
@inproceedings{teaching_programming_2019,
title = {Poster: Teaching Programming with Interactive & Collaborative Learning},
author = {Md Mahmudur Rahman},
year = {2019},
date = {2019-11-01},
booktitle = {AAC&U’s Transforming STEM Higher Education conference},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2018

Rahman, Md Mahmudur
Poster: Interactive Teaching of an Introductory CS Course with Increasing Student Motivation and Retention Proceedings Article
In: AAC&U’s Transforming STEM Higher Education conference, 2018.
BibTeX | Tags:
@inproceedings{interactive_2018,
title = {Poster: Interactive Teaching of an Introductory CS Course with Increasing Student Motivation and Retention},
author = {Md Mahmudur Rahman},
year = {2018},
date = {2018-11-01},
booktitle = {AAC&U’s Transforming STEM Higher Education conference},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Rahman, Md Mahmudur
Poster: Infusing Visual Programming and Interactive Learning to Teach an Introductory Programming Course with Positive Learning Outcomes Proceedings Article
In: SIGCSE '18: Proceedings of the 49th ACM Technical Symposium on Computer Science Education, 2018.
BibTeX | Tags:
@inproceedings{infusing_2018,
title = {Poster: Infusing Visual Programming and Interactive Learning to Teach an Introductory Programming Course with Positive Learning Outcomes},
author = {Md Mahmudur Rahman},
year = {2018},
date = {2018-02-01},
booktitle = {SIGCSE '18: Proceedings of the 49th ACM Technical Symposium on Computer Science Education},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Uddin, Md. Raihan; Sharma, Alok; Farid, Dewan Md; Rahman, Md. Mahmudur; Dehzangi, Abdollah; Shatabda, Swakkhar
EvoStruct-Sub: An accurate Gram-positive protein subcellular localization predictor using evolutionary and structural features Journal Article
In: Journal of Theoretical Biology, vol. 443, pp. 138-146, 2018, ISSN: 0022-5193.
Abstract | Links | BibTeX | Tags: Classification, Evolutionary-based features, Feature selection, Proteins subcellular localization, Structural-based features, Support vector machine
@article{UDDIN2018138,
title = {EvoStruct-Sub: An accurate Gram-positive protein subcellular localization predictor using evolutionary and structural features},
author = {Md. Raihan Uddin and Alok Sharma and Dewan Md Farid and Md. Mahmudur Rahman and Abdollah Dehzangi and Swakkhar Shatabda},
url = {https://www.sciencedirect.com/science/article/pii/S0022519318300559},
doi = {https://doi.org/10.1016/j.jtbi.2018.02.002},
issn = {0022-5193},
year = {2018},
date = {2018-01-01},
journal = {Journal of Theoretical Biology},
volume = {443},
pages = {138-146},
abstract = {Determining subcellular localization of proteins is considered as an important step towards understanding their functions. Previous studies have mainly focused solely on Gene Ontology (GO) as the main feature to tackle this problem. However, it was shown that features extracted based on GO is hard to be used for new proteins with unknown GO. At the same time, evolutionary information extracted from Position Specific Scoring Matrix (PSSM) have been shown as another effective features to tackle this problem. Despite tremendous advancement using these sources for feature extraction, this problem still remains unsolved. In this study we propose EvoStruct-Sub which employs predicted structural information in conjunction with evolutionary information extracted directly from the protein sequence to tackle this problem. To do this we use several different feature extraction method that have been shown promising in subcellular localization as well as similar studies to extract effective local and global discriminatory information. We then use Support Vector Machine (SVM) as our classification technique to build EvoStruct-Sub. As a result, we are able to enhance Gram-positive subcellular localization prediction accuracies by up to 5.6% better than previous studies including the studies that used GO for feature extraction.},
keywords = {Classification, Evolutionary-based features, Feature selection, Proteins subcellular localization, Structural-based features, Support vector machine},
pubstate = {published},
tppubtype = {article}
}

Rahman, Md Mahmudur; Paudel, Roshan
Preliminary experience and learning outcomes by infusing interactive and active learning to teach an introductory programming course in Python Proceedings Article
In: Proceedings of the International Conference on Frontiers in Education: Computer Science and Computer Engineering (FECS), pp. 51–57, The Steering Committee of The World Congress in Computer Science, Computer~… 2018.
BibTeX | Tags:
@inproceedings{rahman2018preliminary,
title = {Preliminary experience and learning outcomes by infusing interactive and active learning to teach an introductory programming course in Python},
author = {Md Mahmudur Rahman and Roshan Paudel},
year = {2018},
date = {2018-01-01},
booktitle = {Proceedings of the International Conference on Frontiers in Education: Computer Science and Computer Engineering (FECS)},
pages = {51--57},
organization = {The Steering Committee of The World Congress in Computer Science, Computer~…},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Rahman, Md Mahmudur; Paudel, Roshan
Visual programming and interactive learning based dynamic instructional approaches to teach an introductory programming course Proceedings Article
In: 2018 IEEE Frontiers in Education Conference (FIE), pp. 1–6, IEEE 2018.
BibTeX | Tags:
@inproceedings{rahman2018visual,
title = {Visual programming and interactive learning based dynamic instructional approaches to teach an introductory programming course},
author = {Md Mahmudur Rahman and Roshan Paudel},
year = {2018},
date = {2018-01-01},
booktitle = {2018 IEEE Frontiers in Education Conference (FIE)},
pages = {1--6},
organization = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2017

Rahman, Md Mahmudur
Poster: Incorporating interactive visual learning with computational thinking (CT) concepts to teach an introductory programming course Proceedings Article
In: International Computing Education Research conference (ICER 2017), 2017.
BibTeX | Tags:
@inproceedings{incorporating_2017,
title = {Poster: Incorporating interactive visual learning with computational thinking (CT) concepts to teach an introductory programming course},
author = {Md Mahmudur Rahman},
year = {2017},
date = {2017-08-01},
booktitle = {International Computing Education Research conference (ICER 2017)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2016

Rahman, Mahmudur; Alpaslan, Nuh; Bhattacharya, Prabir
Developing a retrieval based diagnostic aid for automated melanoma recognition of dermoscopic images Proceedings Article
In: 2016 IEEE Applied Imagery Pattern Recognition Workshop (AIPR), pp. 1-7, 2016.
@inproceedings{8010594,
title = {Developing a retrieval based diagnostic aid for automated melanoma recognition of dermoscopic images},
author = {Mahmudur Rahman and Nuh Alpaslan and Prabir Bhattacharya},
doi = {10.1109/AIPR.2016.8010594},
year = {2016},
date = {2016-01-01},
booktitle = {2016 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)},
pages = {1-7},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2015

Simpson, Matthew S; You, Daekeun; Rahman, Md Mahmudur; Xue, Zhiyun; Demner-Fushman, Dina; Antani, Sameer; Thoma, George
Literature-based biomedical image classification and retrieval Journal Article
In: Computerized Medical Imaging and Graphics, vol. 39, pp. 3-13, 2015, ISSN: 0895-6111, (Medical visual information analysis and retrieval).
Abstract | Links | BibTeX | Tags: Case-based retrieval, Compound figure separation, Image-based retrieval, Modality classification
@article{SIMPSON20153,
title = {Literature-based biomedical image classification and retrieval},
author = {Matthew S Simpson and Daekeun You and Md Mahmudur Rahman and Zhiyun Xue and Dina Demner-Fushman and Sameer Antani and George Thoma},
url = {https://www.sciencedirect.com/science/article/pii/S0895611114000998},
doi = {https://doi.org/10.1016/j.compmedimag.2014.06.006},
issn = {0895-6111},
year = {2015},
date = {2015-01-01},
journal = {Computerized Medical Imaging and Graphics},
volume = {39},
pages = {3-13},
abstract = {Literature-based image informatics techniques are essential for managing the rapidly increasing volume of information in the biomedical domain. Compound figure separation, modality classification, and image retrieval are three related tasks useful for enabling efficient access to the most relevant images contained in the literature. In this article, we describe approaches to these tasks and the evaluation of our methods as part of the 2013 medical track of ImageCLEF. In performing each of these tasks, the textual and visual features used to represent images are an important consideration often left unaddressed. Therefore, we also describe a gradient-based optimization strategy for determining meaningful combinations of features and apply the method to the image retrieval task. An evaluation of our optimization strategy indicates the method is capable of producing statistically significant improvements in retrieval performance. Furthermore, the results of the 2013 ImageCLEF evaluation demonstrate the effectiveness of our techniques. In particular, our text-based and mixed image retrieval methods ranked first among all the participating groups.},
note = {Medical visual information analysis and retrieval},
keywords = {Case-based retrieval, Compound figure separation, Image-based retrieval, Modality classification},
pubstate = {published},
tppubtype = {article}
}

Paul, Arnob; Khan, Tanvir Zaman; Podder, Prajoy; Ahmed, Rafi; Rahman, Muktadir M; Khan, Mamdudul Haque
Iris image compression using wavelets transform coding Proceedings Article
In: 2015 2nd International Conference on Signal Processing and Integrated Networks (SPIN), pp. 544-548, 2015.
@inproceedings{7095407,
title = {Iris image compression using wavelets transform coding},
author = {Arnob Paul and Tanvir Zaman Khan and Prajoy Podder and Rafi Ahmed and Muktadir M Rahman and Mamdudul Haque Khan},
doi = {10.1109/SPIN.2015.7095407},
year = {2015},
date = {2015-01-01},
booktitle = {2015 2nd International Conference on Signal Processing and Integrated Networks (SPIN)},
pages = {544-548},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2013

Rahman, Md Mahmudur; You, Daekeun; Simpson, Matthew S; Antani, Sameer K; Demner-Fushman, Dina; Thoma, George R
Multimodal biomedical image retrieval using hierarchical classification and modality fusion Journal Article
In: International Journal of Multimedia Information Retrieval, vol. 2, no. 3, pp. 159–173, 2013, ISBN: 2192-662X.
Abstract | Links | BibTeX | Tags:
@article{cite-key,
title = {Multimodal biomedical image retrieval using hierarchical classification and modality fusion},
author = {Md Mahmudur Rahman and Daekeun You and Matthew S Simpson and Sameer K Antani and Dina Demner-Fushman and George R Thoma},
url = {https://doi.org/10.1007/s13735-013-0038-4},
doi = {10.1007/s13735-013-0038-4},
isbn = {2192-662X},
year = {2013},
date = {2013-01-01},
journal = {International Journal of Multimedia Information Retrieval},
volume = {2},
number = {3},
pages = {159--173},
abstract = {Images are frequently used in articles to convey essential information in context with correlated text. However, searching images in a task-specific way poses significant challenges. To minimize limitations of low-level feature representations in content-based image retrieval (CBIR), and to complement text-based search, we propose a multi-modal image search approach that exploits hierarchical organization of modalities and employs both intra and inter-modality fusion techniques. For the CBIR search, several visual features were extracted to represent the images. Modality-specific information was used for similarity fusion and selection of a relevant image subset. Intra-modality fusion of retrieval results was performed by searching images for specific informational elements. Our methods use text extracted from relevant components in a document to create structured representations as ``enriched citations''for the text-based search approach. Finally, the multi-modal search consists of a weighted linear combination of similarity scores of independent output results from textual and visual search approaches (inter modality). Search results were evaluated using a standard ImageCLEFmed 2012 evaluation dataset of 300,000 images with associated annotations. We achieved a mean average precision (MAP) score of 0.2533, which is statistically significant, and better in performance (7 % improvement) over comparable results in ImageCLEFmed 2012.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2011

Rahman, Md Mahmudur; Antani, Sameer K; Thoma, George R
A Learning-Based Similarity Fusion and Filtering Approach for Biomedical Image Retrieval Using SVM Classification and Relevance Feedback Journal Article
In: IEEE Transactions on Information Technology in Biomedicine, vol. 15, no. 4, pp. 640-646, 2011.
@article{5892891,
title = {A Learning-Based Similarity Fusion and Filtering Approach for Biomedical Image Retrieval Using SVM Classification and Relevance Feedback},
author = {Md Mahmudur Rahman and Sameer K Antani and George R Thoma},
doi = {10.1109/TITB.2011.2151258},
year = {2011},
date = {2011-01-01},
journal = {IEEE Transactions on Information Technology in Biomedicine},
volume = {15},
number = {4},
pages = {640-646},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Rahman, M M; Antani, S K; Thoma, G R
A query expansion framework in image retrieval domain based on local and global analysis Journal Article
In: Information Processing & Management, vol. 47, no. 5, pp. 676-691, 2011, ISSN: 0306-4573, (Managing and Mining Multilingual Documents).
Abstract | Links | BibTeX | Tags: Image retrieval, Query expansion, Relevance feedback, Support vector machine, Vector space model
@article{RAHMAN2011676,
title = {A query expansion framework in image retrieval domain based on local and global analysis},
author = {M M Rahman and S K Antani and G R Thoma},
url = {https://www.sciencedirect.com/science/article/pii/S0306457310001020},
doi = {https://doi.org/10.1016/j.ipm.2010.12.001},
issn = {0306-4573},
year = {2011},
date = {2011-01-01},
journal = {Information Processing & Management},
volume = {47},
number = {5},
pages = {676-691},
abstract = {We present an image retrieval framework based on automatic query expansion in a concept feature space by generalizing the vector space model of information retrieval. In this framework, images are represented by vectors of weighted concepts similar to the keyword-based representation used in text retrieval. To generate the concept vocabularies, a statistical model is built by utilizing Support Vector Machine (SVM)-based classification techniques. The images are represented as “bag of concepts” that comprise perceptually and/or semantically distinguishable color and texture patches from local image regions in a multi-dimensional feature space. To explore the correlation between the concepts and overcome the assumption of feature independence in this model, we propose query expansion techniques in the image domain from a new perspective based on both local and global analysis. For the local analysis, the correlations between the concepts based on the co-occurrence pattern, and the metrical constraints based on the neighborhood proximity between the concepts in encoded images, are analyzed by considering local feedback information. We also analyze the concept similarities in the collection as a whole in the form of a similarity thesaurus and propose an efficient query expansion based on the global analysis. The experimental results on a photographic collection of natural scenes and a biomedical database of different imaging modalities demonstrate the effectiveness of the proposed framework in terms of precision and recall.},
note = {Managing and Mining Multilingual Documents},
keywords = {Image retrieval, Query expansion, Relevance feedback, Support vector machine, Vector space model},
pubstate = {published},
tppubtype = {article}
}
2010

Rahman, M M; Bhattacharya, P
An integrated and interactive decision support system for automated melanoma recognition of dermoscopic images Journal Article
In: Computerized Medical Imaging and Graphics, vol. 34, no. 6, pp. 479-486, 2010, ISSN: 0895-6111, (Biomedical Image Technologies and Methods - BIBE 2008).
Abstract | Links | BibTeX | Tags: Classification, Content-based image retrieval, Decision support system, Dermoscopy, Fusion, Melanoma, Skin cancer
@article{RAHMAN2010479,
title = {An integrated and interactive decision support system for automated melanoma recognition of dermoscopic images},
author = {M M Rahman and P Bhattacharya},
url = {https://www.sciencedirect.com/science/article/pii/S0895611109001311},
doi = {https://doi.org/10.1016/j.compmedimag.2009.10.003},
issn = {0895-6111},
year = {2010},
date = {2010-01-01},
journal = {Computerized Medical Imaging and Graphics},
volume = {34},
number = {6},
pages = {479-486},
abstract = {This paper presents an integrated and interactive decision support system for the automated melanoma recognition of the dermoscopic images based on image retrieval by content and multiple expert fusion. In this context, the ultimate aim is to support the decision making by retrieving and displaying the relevant past cases as well as predicting the image categories (e.g., melanoma, benign and dysplastic nevi) by combining outputs from different classifiers. However, the most challenging aspect in this domain is to detect the lesion from the healthy background skin and extract the lesion-specific local image features. A thresholding-based segmentation method is applied on the intensity images generated from two different schemes to detect the lesion. For the fusion-based image retrieval and classification, the lesion-specific local color and texture features are extracted and represented in the form of the mean and variance–covariance of color channels and in a combined feature space. The performance is evaluated by using both the precision-recall and classification accuracies. Experimental results on a dermoscopic image collection demonstrate the effectiveness of the proposed system and show the viability of a real-time clinical application.},
note = {Biomedical Image Technologies and Methods - BIBE 2008},
keywords = {Classification, Content-based image retrieval, Decision support system, Dermoscopy, Fusion, Melanoma, Skin cancer},
pubstate = {published},
tppubtype = {article}
}

You, Daekeun; Antani, Sameer; Demner-Fushman, Dina; Rahman, Md Mahmudur; Govindaraju, Venu; Thoma, George R
Biomedical article retrieval using multimodal features and image annotations in region-based CBIR Proceedings Article
In: Likforman-Sulem, Laurence; Agam, Gady (Ed.): Document Recognition and Retrieval XVII, pp. 282 – 293, International Society for Optics and Photonics SPIE, 2010.
Links | BibTeX | Tags: biomedical article retrieval, biomedical image analysis, Content-based image retrieval, figure caption text analysis, image overlay extraction, pointer symbol extraction
@inproceedings{,
title = {Biomedical article retrieval using multimodal features and image annotations in region-based CBIR},
author = {Daekeun You and Sameer Antani and Dina Demner-Fushman and Md Mahmudur Rahman and Venu Govindaraju and George R Thoma},
editor = {Laurence Likforman-Sulem and Gady Agam},
url = {https://doi.org/10.1117/12.838973},
year = {2010},
date = {2010-01-01},
booktitle = {Document Recognition and Retrieval XVII},
volume = {7534},
pages = {282 -- 293},
publisher = {SPIE},
organization = {International Society for Optics and Photonics},
keywords = {biomedical article retrieval, biomedical image analysis, Content-based image retrieval, figure caption text analysis, image overlay extraction, pointer symbol extraction},
pubstate = {published},
tppubtype = {inproceedings}
}

Sohail, Abu Sayeed Md.; Rahman, Md. Mahmudur; Bhattacharya, Prabir; Krishnamurthy, Srinivasan; Mudur, Sudhir P
Retrieval and classification of ultrasound images of ovarian cysts combining texture features and histogram moments Proceedings Article
In: 2010 IEEE International Symposium on Biomedical Imaging: From Nano to Macro, pp. 288-291, 2010.
@inproceedings{5490352,
title = {Retrieval and classification of ultrasound images of ovarian cysts combining texture features and histogram moments},
author = {Abu Sayeed Md. Sohail and Md. Mahmudur Rahman and Prabir Bhattacharya and Srinivasan Krishnamurthy and Sudhir P Mudur},
doi = {10.1109/ISBI.2010.5490352},
year = {2010},
date = {2010-01-01},
booktitle = {2010 IEEE International Symposium on Biomedical Imaging: From Nano to Macro},
pages = {288-291},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Rahman, Md Mahmudur; Antani, Sameer K; Thoma, George R
A Classification-Driven Similarity Matching Framework for Retrieval of Biomedical Images Proceedings Article
In: Proceedings of the International Conference on Multimedia Information Retrieval, pp. 147–154, Association for Computing Machinery, Philadelphia, Pennsylvania, USA, 2010, ISBN: 9781605588155.
Abstract | Links | BibTeX | Tags: Classification, Classifier combination, Content-based image retrieval, Medical imaging, similarity matching, Support vector machine
@inproceedings{10.1145/1743384.1743413,
title = {A Classification-Driven Similarity Matching Framework for Retrieval of Biomedical Images},
author = {Md Mahmudur Rahman and Sameer K Antani and George R Thoma},
url = {https://doi.org/10.1145/1743384.1743413},
doi = {10.1145/1743384.1743413},
isbn = {9781605588155},
year = {2010},
date = {2010-01-01},
booktitle = {Proceedings of the International Conference on Multimedia Information Retrieval},
pages = {147–154},
publisher = {Association for Computing Machinery},
address = {Philadelphia, Pennsylvania, USA},
series = {MIR '10},
abstract = {This paper presents a classification-driven biomedical image retrieval system to bride
the semantic gap by transforming image features to their global categories at different
granularity, such as image modality, body part, and orientation. To generate the feature
vectors at different levels of abstraction, both the visual concept feature based
on the "bag of concepts" model that comprise of local color and texture patches and
various low-level global color, edge, and texture-related features are extracted.
Since, it is difficult to find a unique feature to compare images effectively for
all types of queries, we utilize a similarity fusion approach based on the linear
combination of individual features. However, instead of using the commonly used fixed
or hard weighting approach, we rely on the image classification to determine the importance
of a feature at real time. For this, a supervised multi-class classifier based on
the support vector machine (SVM) is trained on a set of sample images and classifier
combination techniques based on the rules derived from the Bayes's theorem are explored.
After the combined prediction of the classifiers for a query image category, the individual
pre-computed weights of different features are adjusted in the similarity matching
function for effective query-specific retrieval. Experiment is performed in a diverse
medical image collection of 67,000 images of different modalities. It demonstrates
the effectiveness of the category-specific similarity fusion approach with a mean
average precision (MAP) score of 0.0265 when compared to using only a single feature
or equal weighting of each feature in similarity matching.},
keywords = {Classification, Classifier combination, Content-based image retrieval, Medical imaging, similarity matching, Support vector machine},
pubstate = {published},
tppubtype = {inproceedings}
}
the semantic gap by transforming image features to their global categories at different
granularity, such as image modality, body part, and orientation. To generate the feature
vectors at different levels of abstraction, both the visual concept feature based
on the "bag of concepts" model that comprise of local color and texture patches and
various low-level global color, edge, and texture-related features are extracted.
Since, it is difficult to find a unique feature to compare images effectively for
all types of queries, we utilize a similarity fusion approach based on the linear
combination of individual features. However, instead of using the commonly used fixed
or hard weighting approach, we rely on the image classification to determine the importance
of a feature at real time. For this, a supervised multi-class classifier based on
the support vector machine (SVM) is trained on a set of sample images and classifier
combination techniques based on the rules derived from the Bayes's theorem are explored.
After the combined prediction of the classifiers for a query image category, the individual
pre-computed weights of different features are adjusted in the similarity matching
function for effective query-specific retrieval. Experiment is performed in a diverse
medical image collection of 67,000 images of different modalities. It demonstrates
the effectiveness of the category-specific similarity fusion approach with a mean
average precision (MAP) score of 0.0265 when compared to using only a single feature
or equal weighting of each feature in similarity matching.
2009

Rahman, Md Mahmudur; Antani, Sameer K; Thoma, George R
A medical image retrieval framework in correlation enhanced visual concept feature space Proceedings Article
In: 2009 22nd IEEE International Symposium on Computer-Based Medical Systems, pp. 1-4, 2009.
@inproceedings{5255392,
title = {A medical image retrieval framework in correlation enhanced visual concept feature space},
author = {Md Mahmudur Rahman and Sameer K Antani and George R Thoma},
doi = {10.1109/CBMS.2009.5255392},
year = {2009},
date = {2009-01-01},
booktitle = {2009 22nd IEEE International Symposium on Computer-Based Medical Systems},
pages = {1-4},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Rahman, Md. Mahmudur; Bhattacharya, Prabir; Desai, Bipin C
A unified image retrieval framework on local visual and semantic concept-based feature spaces Journal Article
In: Journal of Visual Communication and Image Representation, vol. 20, no. 7, pp. 450-462, 2009, ISSN: 1047-3203.
Abstract | Links | BibTeX | Tags: Classification, Content-based image retrieval, Learning methods, Relevance feedback, Self-organizing map, Similarity fusion, Support vector machine
@article{RAHMAN2009450,
title = {A unified image retrieval framework on local visual and semantic concept-based feature spaces},
author = {Md. Mahmudur Rahman and Prabir Bhattacharya and Bipin C Desai},
url = {https://www.sciencedirect.com/science/article/pii/S1047320309000686},
doi = {https://doi.org/10.1016/j.jvcir.2009.06.001},
issn = {1047-3203},
year = {2009},
date = {2009-01-01},
journal = {Journal of Visual Communication and Image Representation},
volume = {20},
number = {7},
pages = {450-462},
abstract = {This paper presents a learning-based unified image retrieval framework to represent images in local visual and semantic concept-based feature spaces. In this framework, a visual concept vocabulary (codebook) is automatically constructed by utilizing self-organizing map (SOM) and statistical models are built for local semantic concepts using probabilistic multi-class support vector machine (SVM). Based on these constructions, the images are represented in correlation and spatial relationship-enhanced concept feature spaces by exploiting the topology preserving local neighborhood structure of the codebook, local concept correlation statistics, and spatial relationships in individual encoded images. Finally, the features are unified by a dynamically weighted linear combination of similarity matching scheme based on the relevance feedback information. The feature weights are calculated by considering both the precision and the rank order information of the top retrieved relevant images of each representation, which adapts itself to individual searches to produce effective results. The experimental results on a photographic database of natural scenes and a bio-medical database of different imaging modalities and body parts demonstrate the effectiveness of the proposed framework.},
keywords = {Classification, Content-based image retrieval, Learning methods, Relevance feedback, Self-organizing map, Similarity fusion, Support vector machine},
pubstate = {published},
tppubtype = {article}
}
2008

Rahman, Md. Mahmudur; Desai, Bipin C; Bhattacharya, Prabir
Medical image retrieval with probabilistic multi-class support vector machine classifiers and adaptive similarity fusion Journal Article
In: Computerized Medical Imaging and Graphics, vol. 32, no. 2, pp. 95-108, 2008, ISSN: 0895-6111.
Abstract | Links | BibTeX | Tags: Classification, Classifier combination, Content-based image retrieval, Inverted file, Medical imaging, Similarity fusion, Support vector machine
@article{RAHMAN200895,
title = {Medical image retrieval with probabilistic multi-class support vector machine classifiers and adaptive similarity fusion},
author = {Md. Mahmudur Rahman and Bipin C Desai and Prabir Bhattacharya},
url = {https://www.sciencedirect.com/science/article/pii/S0895611107001383},
doi = {https://doi.org/10.1016/j.compmedimag.2007.10.001},
issn = {0895-6111},
year = {2008},
date = {2008-01-01},
journal = {Computerized Medical Imaging and Graphics},
volume = {32},
number = {2},
pages = {95-108},
abstract = {We present a content-based image retrieval framework for diverse collections of medical images of different modalities, anatomical regions, acquisition views, and biological systems. For the image representation, the probabilistic output from multi-class support vector machines (SVMs) with low-level features as inputs are represented as a vector of confidence or membership scores of pre-defined image categories. The outputs are combined for feature-level fusion and retrieval based on the combination rules that are derived by following Bayes’ theorem. We also propose an adaptive similarity fusion approach based on a linear combination of individual feature level similarities. The feature weights are calculated by considering both the precision and the rank order information of top retrieved relevant images as predicted by SVMs. The weights are dynamically updated by the system for each individual search to produce effective results. The experiments and analysis of the results are based on a diverse medical image collection of 11,000 images of 116 categories. The performances of the classification and retrieval algorithms are evaluated both in terms of error rate and precision–recall. Our results demonstrate the effectiveness of the proposed framework as compared to the commonly used approaches based on low-level feature descriptors.},
keywords = {Classification, Classifier combination, Content-based image retrieval, Inverted file, Medical imaging, Similarity fusion, Support vector machine},
pubstate = {published},
tppubtype = {article}
}

Rahman, Md. Mahmudur; Bhattacharya, Prabir; Desai, Bipin C
A multiple expert-based melanoma recognition system for dermoscopic images of pigmented skin lesions Proceedings Article
In: 2008 8th IEEE International Conference on BioInformatics and BioEngineering, pp. 1-6, 2008.
@inproceedings{4696799,
title = {A multiple expert-based melanoma recognition system for dermoscopic images of pigmented skin lesions},
author = {Md. Mahmudur Rahman and Prabir Bhattacharya and Bipin C Desai},
doi = {10.1109/BIBE.2008.4696799},
year = {2008},
date = {2008-01-01},
booktitle = {2008 8th IEEE International Conference on BioInformatics and BioEngineering},
pages = {1-6},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2007

Rahman, Md. Mahmudur; Bhattacharya, Prabir; Desai, Bipin C
A Framework for Medical Image Retrieval Using Machine Learning and Statistical Similarity Matching Techniques With Relevance Feedback Journal Article
In: IEEE Transactions on Information Technology in Biomedicine, vol. 11, no. 1, pp. 58-69, 2007.
@article{4049802,
title = {A Framework for Medical Image Retrieval Using Machine Learning and Statistical Similarity Matching Techniques With Relevance Feedback},
author = {Md. Mahmudur Rahman and Prabir Bhattacharya and Bipin C Desai},
doi = {10.1109/TITB.2006.884364},
year = {2007},
date = {2007-01-01},
journal = {IEEE Transactions on Information Technology in Biomedicine},
volume = {11},
number = {1},
pages = {58-69},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2006

Rahman, Md. M.; Desai, B C; Bhattacharya, P
Image Retrieval-Based Decision Support System for Dermatoscopic Images Proceedings Article
In: 19th IEEE Symposium on Computer-Based Medical Systems (CBMS'06), pp. 285-290, 2006.
@inproceedings{1647583,
title = {Image Retrieval-Based Decision Support System for Dermatoscopic Images},
author = {Md.M. Rahman and B C Desai and P Bhattacharya},
doi = {10.1109/CBMS.2006.98},
year = {2006},
date = {2006-01-01},
booktitle = {19th IEEE Symposium on Computer-Based Medical Systems (CBMS'06)},
pages = {285-290},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2004

Rahman, M; Wang, Tongyuan; Desai, B C
Medical image retrieval and registration: towards computer assisted diagnostic approach Proceedings Article
In: 2004 IDEAS Workshop on Medical Information Systems: The Digital Hospital (IDEAS-DH'04), pp. 78-89, 2004.
@inproceedings{1410527,
title = {Medical image retrieval and registration: towards computer assisted diagnostic approach},
author = {M Rahman and Tongyuan Wang and B C Desai},
doi = {10.1109/IDEADH.2004.17},
year = {2004},
date = {2004-01-01},
booktitle = {2004 IDEAS Workshop on Medical Information Systems: The Digital Hospital (IDEAS-DH'04)},
pages = {78-89},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}