2018
Uddin, Md. Raihan; Sharma, Alok; Farid, Dewan Md; Rahman, Md. Mahmudur; Dehzangi, Abdollah; Shatabda, Swakkhar
EvoStruct-Sub: An accurate Gram-positive protein subcellular localization predictor using evolutionary and structural features Journal Article
In: Journal of Theoretical Biology, vol. 443, pp. 138-146, 2018, ISSN: 0022-5193.
Abstract | Links | BibTeX | Tags: Classification, Evolutionary-based features, Feature selection, Proteins subcellular localization, Structural-based features, Support vector machine
@article{UDDIN2018138,
title = {EvoStruct-Sub: An accurate Gram-positive protein subcellular localization predictor using evolutionary and structural features},
author = {Md. Raihan Uddin and Alok Sharma and Dewan Md Farid and Md. Mahmudur Rahman and Abdollah Dehzangi and Swakkhar Shatabda},
url = {https://www.sciencedirect.com/science/article/pii/S0022519318300559},
doi = {https://doi.org/10.1016/j.jtbi.2018.02.002},
issn = {0022-5193},
year = {2018},
date = {2018-01-01},
journal = {Journal of Theoretical Biology},
volume = {443},
pages = {138-146},
abstract = {Determining subcellular localization of proteins is considered as an important step towards understanding their functions. Previous studies have mainly focused solely on Gene Ontology (GO) as the main feature to tackle this problem. However, it was shown that features extracted based on GO is hard to be used for new proteins with unknown GO. At the same time, evolutionary information extracted from Position Specific Scoring Matrix (PSSM) have been shown as another effective features to tackle this problem. Despite tremendous advancement using these sources for feature extraction, this problem still remains unsolved. In this study we propose EvoStruct-Sub which employs predicted structural information in conjunction with evolutionary information extracted directly from the protein sequence to tackle this problem. To do this we use several different feature extraction method that have been shown promising in subcellular localization as well as similar studies to extract effective local and global discriminatory information. We then use Support Vector Machine (SVM) as our classification technique to build EvoStruct-Sub. As a result, we are able to enhance Gram-positive subcellular localization prediction accuracies by up to 5.6% better than previous studies including the studies that used GO for feature extraction.},
keywords = {Classification, Evolutionary-based features, Feature selection, Proteins subcellular localization, Structural-based features, Support vector machine},
pubstate = {published},
tppubtype = {article}
}
2010
Rahman, M M; Bhattacharya, P
An integrated and interactive decision support system for automated melanoma recognition of dermoscopic images Journal Article
In: Computerized Medical Imaging and Graphics, vol. 34, no. 6, pp. 479-486, 2010, ISSN: 0895-6111, (Biomedical Image Technologies and Methods - BIBE 2008).
Abstract | Links | BibTeX | Tags: Classification, Content-based image retrieval, Decision support system, Dermoscopy, Fusion, Melanoma, Skin cancer
@article{RAHMAN2010479,
title = {An integrated and interactive decision support system for automated melanoma recognition of dermoscopic images},
author = {M M Rahman and P Bhattacharya},
url = {https://www.sciencedirect.com/science/article/pii/S0895611109001311},
doi = {https://doi.org/10.1016/j.compmedimag.2009.10.003},
issn = {0895-6111},
year = {2010},
date = {2010-01-01},
journal = {Computerized Medical Imaging and Graphics},
volume = {34},
number = {6},
pages = {479-486},
abstract = {This paper presents an integrated and interactive decision support system for the automated melanoma recognition of the dermoscopic images based on image retrieval by content and multiple expert fusion. In this context, the ultimate aim is to support the decision making by retrieving and displaying the relevant past cases as well as predicting the image categories (e.g., melanoma, benign and dysplastic nevi) by combining outputs from different classifiers. However, the most challenging aspect in this domain is to detect the lesion from the healthy background skin and extract the lesion-specific local image features. A thresholding-based segmentation method is applied on the intensity images generated from two different schemes to detect the lesion. For the fusion-based image retrieval and classification, the lesion-specific local color and texture features are extracted and represented in the form of the mean and variance–covariance of color channels and in a combined feature space. The performance is evaluated by using both the precision-recall and classification accuracies. Experimental results on a dermoscopic image collection demonstrate the effectiveness of the proposed system and show the viability of a real-time clinical application.},
note = {Biomedical Image Technologies and Methods - BIBE 2008},
keywords = {Classification, Content-based image retrieval, Decision support system, Dermoscopy, Fusion, Melanoma, Skin cancer},
pubstate = {published},
tppubtype = {article}
}
Rahman, Md Mahmudur; Antani, Sameer K; Thoma, George R
A Classification-Driven Similarity Matching Framework for Retrieval of Biomedical Images Proceedings Article
In: Proceedings of the International Conference on Multimedia Information Retrieval, pp. 147–154, Association for Computing Machinery, Philadelphia, Pennsylvania, USA, 2010, ISBN: 9781605588155.
Abstract | Links | BibTeX | Tags: Classification, Classifier combination, Content-based image retrieval, Medical imaging, similarity matching, Support vector machine
@inproceedings{10.1145/1743384.1743413,
title = {A Classification-Driven Similarity Matching Framework for Retrieval of Biomedical Images},
author = {Md Mahmudur Rahman and Sameer K Antani and George R Thoma},
url = {https://doi.org/10.1145/1743384.1743413},
doi = {10.1145/1743384.1743413},
isbn = {9781605588155},
year = {2010},
date = {2010-01-01},
booktitle = {Proceedings of the International Conference on Multimedia Information Retrieval},
pages = {147–154},
publisher = {Association for Computing Machinery},
address = {Philadelphia, Pennsylvania, USA},
series = {MIR '10},
abstract = {This paper presents a classification-driven biomedical image retrieval system to bride
the semantic gap by transforming image features to their global categories at different
granularity, such as image modality, body part, and orientation. To generate the feature
vectors at different levels of abstraction, both the visual concept feature based
on the "bag of concepts" model that comprise of local color and texture patches and
various low-level global color, edge, and texture-related features are extracted.
Since, it is difficult to find a unique feature to compare images effectively for
all types of queries, we utilize a similarity fusion approach based on the linear
combination of individual features. However, instead of using the commonly used fixed
or hard weighting approach, we rely on the image classification to determine the importance
of a feature at real time. For this, a supervised multi-class classifier based on
the support vector machine (SVM) is trained on a set of sample images and classifier
combination techniques based on the rules derived from the Bayes's theorem are explored.
After the combined prediction of the classifiers for a query image category, the individual
pre-computed weights of different features are adjusted in the similarity matching
function for effective query-specific retrieval. Experiment is performed in a diverse
medical image collection of 67,000 images of different modalities. It demonstrates
the effectiveness of the category-specific similarity fusion approach with a mean
average precision (MAP) score of 0.0265 when compared to using only a single feature
or equal weighting of each feature in similarity matching.},
keywords = {Classification, Classifier combination, Content-based image retrieval, Medical imaging, similarity matching, Support vector machine},
pubstate = {published},
tppubtype = {inproceedings}
}
the semantic gap by transforming image features to their global categories at different
granularity, such as image modality, body part, and orientation. To generate the feature
vectors at different levels of abstraction, both the visual concept feature based
on the "bag of concepts" model that comprise of local color and texture patches and
various low-level global color, edge, and texture-related features are extracted.
Since, it is difficult to find a unique feature to compare images effectively for
all types of queries, we utilize a similarity fusion approach based on the linear
combination of individual features. However, instead of using the commonly used fixed
or hard weighting approach, we rely on the image classification to determine the importance
of a feature at real time. For this, a supervised multi-class classifier based on
the support vector machine (SVM) is trained on a set of sample images and classifier
combination techniques based on the rules derived from the Bayes's theorem are explored.
After the combined prediction of the classifiers for a query image category, the individual
pre-computed weights of different features are adjusted in the similarity matching
function for effective query-specific retrieval. Experiment is performed in a diverse
medical image collection of 67,000 images of different modalities. It demonstrates
the effectiveness of the category-specific similarity fusion approach with a mean
average precision (MAP) score of 0.0265 when compared to using only a single feature
or equal weighting of each feature in similarity matching.
2009
Rahman, Md. Mahmudur; Bhattacharya, Prabir; Desai, Bipin C
A unified image retrieval framework on local visual and semantic concept-based feature spaces Journal Article
In: Journal of Visual Communication and Image Representation, vol. 20, no. 7, pp. 450-462, 2009, ISSN: 1047-3203.
Abstract | Links | BibTeX | Tags: Classification, Content-based image retrieval, Learning methods, Relevance feedback, Self-organizing map, Similarity fusion, Support vector machine
@article{RAHMAN2009450,
title = {A unified image retrieval framework on local visual and semantic concept-based feature spaces},
author = {Md. Mahmudur Rahman and Prabir Bhattacharya and Bipin C Desai},
url = {https://www.sciencedirect.com/science/article/pii/S1047320309000686},
doi = {https://doi.org/10.1016/j.jvcir.2009.06.001},
issn = {1047-3203},
year = {2009},
date = {2009-01-01},
journal = {Journal of Visual Communication and Image Representation},
volume = {20},
number = {7},
pages = {450-462},
abstract = {This paper presents a learning-based unified image retrieval framework to represent images in local visual and semantic concept-based feature spaces. In this framework, a visual concept vocabulary (codebook) is automatically constructed by utilizing self-organizing map (SOM) and statistical models are built for local semantic concepts using probabilistic multi-class support vector machine (SVM). Based on these constructions, the images are represented in correlation and spatial relationship-enhanced concept feature spaces by exploiting the topology preserving local neighborhood structure of the codebook, local concept correlation statistics, and spatial relationships in individual encoded images. Finally, the features are unified by a dynamically weighted linear combination of similarity matching scheme based on the relevance feedback information. The feature weights are calculated by considering both the precision and the rank order information of the top retrieved relevant images of each representation, which adapts itself to individual searches to produce effective results. The experimental results on a photographic database of natural scenes and a bio-medical database of different imaging modalities and body parts demonstrate the effectiveness of the proposed framework.},
keywords = {Classification, Content-based image retrieval, Learning methods, Relevance feedback, Self-organizing map, Similarity fusion, Support vector machine},
pubstate = {published},
tppubtype = {article}
}
2008
Rahman, Md. Mahmudur; Desai, Bipin C; Bhattacharya, Prabir
Medical image retrieval with probabilistic multi-class support vector machine classifiers and adaptive similarity fusion Journal Article
In: Computerized Medical Imaging and Graphics, vol. 32, no. 2, pp. 95-108, 2008, ISSN: 0895-6111.
Abstract | Links | BibTeX | Tags: Classification, Classifier combination, Content-based image retrieval, Inverted file, Medical imaging, Similarity fusion, Support vector machine
@article{RAHMAN200895,
title = {Medical image retrieval with probabilistic multi-class support vector machine classifiers and adaptive similarity fusion},
author = {Md. Mahmudur Rahman and Bipin C Desai and Prabir Bhattacharya},
url = {https://www.sciencedirect.com/science/article/pii/S0895611107001383},
doi = {https://doi.org/10.1016/j.compmedimag.2007.10.001},
issn = {0895-6111},
year = {2008},
date = {2008-01-01},
journal = {Computerized Medical Imaging and Graphics},
volume = {32},
number = {2},
pages = {95-108},
abstract = {We present a content-based image retrieval framework for diverse collections of medical images of different modalities, anatomical regions, acquisition views, and biological systems. For the image representation, the probabilistic output from multi-class support vector machines (SVMs) with low-level features as inputs are represented as a vector of confidence or membership scores of pre-defined image categories. The outputs are combined for feature-level fusion and retrieval based on the combination rules that are derived by following Bayes’ theorem. We also propose an adaptive similarity fusion approach based on a linear combination of individual feature level similarities. The feature weights are calculated by considering both the precision and the rank order information of top retrieved relevant images as predicted by SVMs. The weights are dynamically updated by the system for each individual search to produce effective results. The experiments and analysis of the results are based on a diverse medical image collection of 11,000 images of 116 categories. The performances of the classification and retrieval algorithms are evaluated both in terms of error rate and precision–recall. Our results demonstrate the effectiveness of the proposed framework as compared to the commonly used approaches based on low-level feature descriptors.},
keywords = {Classification, Classifier combination, Content-based image retrieval, Inverted file, Medical imaging, Similarity fusion, Support vector machine},
pubstate = {published},
tppubtype = {article}
}