@article{gurses_funderburk_kesler_powell_rahman_ozdemir_mushi_sichitiu_guvenc_dutta_et al._2023, title={Demonstration of Joint SDR/UAV Experiment Development in AERPAW}, ISSN={["2155-7578"]}, url={http://dx.doi.org/10.1109/milcom58377.2023.10356351}, DOI={10.1109/MILCOM58377.2023.10356351}, abstractNote={The Aerial Experimentation and Research Platform for Advanced Wireless (AERPAW) is an outdoor testbed providing the experimenters access to programmable radios and programmable vehicles. A key aspect of AERPAW is its experiment development environment. This demo introduces potential users to the main capabilities of AERPAW’s development environment. The demo exercises the main three flexible testbed capabilities, namely the ability of an experimenter to choose a wireless radio setup, a vehicle setup, and to set up traffic. The experiment is then executed live, and the collected data is post-processed and displayed.}, journal={MILCOM 2023 - 2023 IEEE MILITARY COMMUNICATIONS CONFERENCE}, author={Gurses, Anil and Funderburk, Mark and Kesler, John and Powell, Keith and Rahman, Talha F. and Ozdemir, Ozgur and Mushi, Magreth and Sichitiu, Mihail L. and Guvenc, Ismail and Dutta, Rudra and et al.}, year={2023} } @article{drago_gurses_heath_sichitiu_zorzi_2023, title={End-to-end Full-Stack Drone Measurements: A Case Study Using AERPAW}, ISSN={["2164-7038"]}, url={http://dx.doi.org/10.1109/iccworkshops57953.2023.10283735}, DOI={10.1109/ICCWORKSHOPS57953.2023.10283735}, abstractNote={While $a$ lot of studies have been made to include drone communications in the 5th Generation of Mobile Networks (5G), it is still arguable how reliably current air-to-ground infrastructures can perform. To apply a further boost to this research direction, the National Science Foundation (NSF) recently funded the Aerial Experimentation Research Platform for Advanced Wireless (AERPAW) for the creation of a high-end publicly available testbed. Considering the current lack in the literature of experimental studies carried out with open testbeds, in this paper we target two contributions. First, we use AERPAW for the end-to-end evaluation of the performance of an emulated Uplink (UL) traffic between an Unmanned Aerial Vehicle (UAV) and a Fixed Node (FN), connected through an open source LTE network software (srsRAN). Second, in addition to providing a thorough analysis of the results obtained from our experiments, we made our testbed's configuration files and collected dataset available to the public, to provide a reference for future research on UAV communication, enabled by AERPAW.}, journal={2023 IEEE INTERNATIONAL CONFERENCE ON COMMUNICATIONS WORKSHOPS, ICC WORKSHOPS}, author={Drago, Matteo and Gurses, Anil and Heath, Robert W., Jr. and Sichitiu, Mihail L. and Zorzi, Michele}, year={2023}, pages={1422–1427} } @article{oktay_akhtar_gurses_2022, title={Dental biometric systems: a comparative study of conventional descriptors and deep learning-based features}, volume={81}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-85127381173&partnerID=MN8TOARS}, DOI={10.1007/s11042-022-12019-7}, number={20}, journal={Multimedia Tools and Applications}, author={Oktay, A.B. and Akhtar, Z. and Gurses, A.}, year={2022}, pages={28183–28206} } @inbook{oktay_gurses_2021, title={Detection, segmentation, and numbering of teeth in dental panoramic images with mask regions with convolutional neural network features}, url={http://dx.doi.org/10.1016/b978-0-12-819740-0.00004-8}, DOI={10.1016/b978-0-12-819740-0.00004-8}, abstractNote={Dental image analysis is important for orthodontics, forensics, and dental treatments like cavity restoration or implants. In order to build a computer-aided diagnosis system for dental analysis, localization and numbering of teeth are crucial. In this study, we propose to use a popular deep learning technique, Mask regions with convolutional neural network features (RCNN), for simultaneous detection, segmentation, and numbering of teeth in panoramic X-ray images. Multiclass labeling is performed by Mask RCNN by giving a unique class name to each tooth type. After classification, postprocessing is performed for numbering teeth according to detected labels and dental chart. The proposed method is trained on 200 images and tested on 278 panoramic dental images. The average tooth detection accuracy is 0.98, and F1 score for segmentation is 0.93.}, booktitle={State of the Art in Neural Networks and their Applications}, publisher={Elsevier}, author={Oktay, Ayse Betul and Gurses, Anıl}, year={2021}, pages={73–90} } @inproceedings{gurses_oktay_2020, title={Human Identification with Panoramic Dental Images using Mask R-CNN and SURF}, url={http://dx.doi.org/10.1109/ubmk50275.2020.9219513}, DOI={10.1109/ubmk50275.2020.9219513}, abstractNote={There exist unidentified bodies after natural disasters, terrorist attacks, and accidents. DNA, fingerprint, and dental information are biometrics that is commonly used by forensic experts to identify the dead bodies. The dental image of an unidentified body is compared with the dental image records taken before death to determine whether both dental images correspond to the same individual. In this study, a new method is presented for human identification with dental biometrics using panoramic X-ray images. The proposed method employs Mask R-CNN and Speeded up Robust Features (SURF) for matching the dental images. The method follows a tooth-wise matching approach that evaluates the correspondence of each tooth individually. The teeth in dental images are segmented and classified with Mask R-CNN. The SURF key points of each tooth in the unidentified dental image are compared with the teeth in the database containing previously captured dental images. The final identification is performed by bringing the tooth-wise scores together. Mask R-CNN is tested on 8896 tooth images and human identification with SURF is tested on 102 panoramic dental images of 51 different individuals taken at different times. The experimental results show promising outcomes with a matching percentage of 80.39% at first rank.}, booktitle={2020 5th International Conference on Computer Science and Engineering (UBMK)}, publisher={IEEE}, author={Gurses, Anil and Oktay, Ayse Betul}, year={2020}, month={Sep}, pages={232–237} } @inproceedings{gurses_oktay_2020, title={Tooth Restoration and Dental Work Detection on Panoramic Dental Images via CNN}, url={http://dx.doi.org/10.1109/tiptekno50054.2020.9299272}, DOI={10.1109/tiptekno50054.2020.9299272}, abstractNote={Automatic detection of dental work and type of restorations plays an important role for human identification and creation of reports for dental treatment at clinics. In this study, we employed three state-of-the-art convolutional neural networks (CNNs), which are GoogleNet, DenseNet and ResNet, for classification of dental restorations. Implants, canal root treatments, amalgam and composite fillings, dental braces and unrestored teeth are the classes that are detected by the networks. The CNNs are validated on a dataset including 3013 tooth images. DenseNet has 94% accuracy which is the highest accuracy among three CNN architectures. Dental braces and implants are detected with more accuracy than other dental work.}, booktitle={2020 Medical Technologies Congress (TIPTEKNO)}, publisher={IEEE}, author={Gurses, Anil and Oktay, Ayse Betul}, year={2020}, month={Nov} } @article{oktay_gurses_2019, title={Automatic detection, localization and segmentation of nano-particles with deep learning in microscopy images}, volume={120}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-85062258030&partnerID=MN8TOARS}, DOI={10.1016/j.micron.2019.02.009}, abstractNote={With the growing amount of high resolution microscopy images automatic nano-particle detection, shape analysis and size determination have gained importance for providing quantitative support that gives important information for the evaluation of the material. In this paper, we present a new method for detection of nano-particles and determination of their shapes and sizes simultaneously with deep learning. The proposed method employs multiple output convolutional neural networks (MO-CNN) and has two outputs: first is the detection output that gives the locations of the particles and the other one is the segmentation output for providing the boundaries of the nano-particles. The final sizes of particles are determined with the modified Hough algorithm that runs on the segmentation output. The proposed method is tested and evaluated on a dataset containing 17 TEM images of Fe3O4 and silica coated nano-particles. Also, we compared these results with U-net algorithm which is a popular deep learning method. The experiments showed that the proposed method has 98.23% accuracy for detection and 96.59% accuracy for segmentation of nano-particles.}, journal={Micron}, author={Oktay, A.B. and Gurses, A.}, year={2019}, pages={113–119} }