@article{ author = {Abolhoseini, Sina and Mesgari, Saadi Mohammad and MohammadiSoleimani, Rez}, title = {Modified particle swarm optimization algorithm to solve location problems on urban transportation networks (Case study: Locating traffic police kiosks)}, abstract ={Nowadays, traffic congestion is a big problem in metropolises all around the world. Traffic problems rise with the rise of population and slow growth of urban transportation systems. Car accidents or population concentration in particular places due to urban events can cause traffic congestions. Such traffic problems require the direct involvement of the traffic police, and it is urgent for them to be present at the scene as soon as possible. Due to the shortage of space, constructing traffic police centers in all areas is not possible. As a result, building traffic police kiosks with limited number of personnel and small cabins is a solution to solve this problem. Finding suitable places to build kiosks is a location optimization problem that can be solved by geospatial analyses. Artificial intelligent algorithms are suitable approaches to solve such problems. Particle Swarm Optimization (PSO) algorithm proved to be a fast and exact algorithm in solving continuous space problems. However, this algorithm cannot be used for discrete space problems without any modifications. In this paper, we modified PSO to solve problems in combinatorial space. Crossover and mutation operators from Genetic Algorithm were used to modify the behavior of particles. After conducting experiments on a part of Tehran’s transportation network, results were compared to the results of Artificial Bee Colony algorithm. In experiments with 2 and 4 kiosks, both algorithms are performing the same in accuracy, stability, convergence trend, and computation time. But in experiments with 10 kiosks on a bigger environment, results are in favor of the modified PSO algorithm in obtaining the optimum value; stability and better distribution in the area of interest. Results indicate that the proposed algorithm, is capable of solving combinatorial problems in a fast and accurate manner.}, Keywords = {Location Problem, Traffic Police Kiosk, Particle Swarm Optimization, Artificial Bee Colony, Urban Transportation Network}, volume = {8}, Number = {3}, pages = {1-19}, publisher = {kntu}, doi = {10.52547/jgit.8.3.1}, url = {http://jgit.kntu.ac.ir/article-1-577-en.html}, eprint = {http://jgit.kntu.ac.ir/article-1-577-en.pdf}, journal = {Engineering Journal of Geospatial Information Technology}, issn = {2008-9635}, eissn = {}, year = {2021} } @article{ author = {Omati, Mehrnoosh and Sahebi, Mahmod Reza and Amerian, Yaz}, title = {Separation Between Anomalous Targets and Background Based on the Decomposition of Reduced Dimension Hyperspectral Image}, abstract ={The application of anomaly detection has been given a special place among the different   processings of hyperspectral images. Nowadays, many of the methods only use background information to detect between anomaly pixels and background. Due to noise and the presence of anomaly pixels in the background, the assumption of the specific statistical distribution of the background, as well as the correlation between bands of hyperspectral images, leads to increase false alarms and the limitation of the presented methods in detecting anomalies. The purpose of this paper is to propose a new method for detecting anomalies with the ability to remove the limitations in background space. In the proposed method, first, the Fast Fourier Transform (FFT) is applied on the image as a preprocess of anomaly detection algorithms. Using this linear dimension reduction technique, in addition to improving the performance of the detection algorithm, can  significantly reduce the calculation. Then, by decomposition of reduced dimension hyperspectral image to the low-rank background matrix and the anomaly sparse matrix, in addition to separation of  the noise from the signals in the image, both the background and anomaly components can be used to extract information. In fact, by separating the component of the anomaly from the background, the effect of the existence of anomalous pixels in the background is reduced and only the low-rank matrix is used to extract information and statistical characteristics. Also, using the weighted average Mahalanobis distance based on the median criterion in the proposed decomposition method, we can allocate a background corresponding weight to each pixel and improve the anomalies detection results. The implementation of the proposed algorithm on the Pavia Hyperspectral Image and comparing its results with other common methods showed better performance of the proposed technique in detecting anomaly pixels from the background space.}, Keywords = {Anomaly Detection, Dimension Reduction &,,,, Decomposition of Hyperspectral Image, Low-rank Background Matrix, Sparse Anomaly Matrix.}, volume = {8}, Number = {3}, pages = {21-38}, publisher = {kntu}, doi = {10.52547/jgit.8.3.21}, url = {http://jgit.kntu.ac.ir/article-1-660-en.html}, eprint = {http://jgit.kntu.ac.ir/article-1-660-en.pdf}, journal = {Engineering Journal of Geospatial Information Technology}, issn = {2008-9635}, eissn = {}, year = {2021} } @article{ author = {fallah, mohammad and Azadbakht, mohse}, title = {Fusion of Thermal Infrared and Visible Images Based on Multi-scale Transform and Sparse Representation}, abstract ={Due to the differences between the visible and thermal infrared images, combination of these two types of images is essential for better understanding the characteristics of targets and the environment. Thermal infrared images have most importance to distinguish targets from the background based on the radiation differences, which work well in all-weather and day/night conditions also in land surface temperature (LST) calculation. However, their spatial resolution is relatively low, making it challenging to detect targets. Image fusion is an efficient method that is employed to enhance spatial resolution of the thermal bands through fusing these images with high spatial resolution visible images. Therefore, it is desirable to fuse these two types of images, which can combine the advantages of both the thermal radiation information and detailed spatial information. Multi-scale transforms (MST) and sparse representation (SR) are widely used in image fusion. To improve the performance of image fusion, these two types of methods can be combined. In this regard, an MST is firstly performed on each of the preregistered source images to obtain their low-pass and high-pass coefficients. Then, the low-pass images are combined with a SR-based fusion approach while the high-pass images are fused using the absolute values of the coefficients. The fused image is finally obtained by performing an inverse MST on the merged coefficients.  In this paper, nine image fusion methods based on the multi-scale transform and sparse representation, namely Laplacian pyramid (LP), ratio of low-pass pyramid (RP), wavelet transform (Wavelet), dual-tree complex wavelet transform (DTCWT), curvelet transform (CVT), nonsubsampled contourlet transform (NSCT), sparse representation (SR), hybrid sparse representation and Laplacian pyramid methods (LP-SR) and hybrid sparse representation and NSCT methods (NSCT-SR) are tested on FLIR and landsat-8 thermal infrared and visible images. To evaluate the performance of different image fusion methods we use three quantitative evaluation metrics: entropy (EN), mutual information (MI), and gradient based fusion metric )QAB/F(. Despite the lack of spectral coverage between the visible and thermal infrared bands of Landsat 8, quantitative evaluation metrics showed that the hybrid LP-SR method provides the best result (EN=7.362, MI=2.605, QAB/F =0.531) and fused images have best visual quality. This method improve spatial details while preserving the thermal radiation information. It followed by RP, LP, and NSCT methods. Similar results were achieved in FLIR images.  }, Keywords = {Visible image, Thermal infrared image, Image fusion, Multi-scale transform, Sparse representation}, volume = {8}, Number = {3}, pages = {39-59}, publisher = {kntu}, doi = {10.52547/jgit.8.3.39}, url = {http://jgit.kntu.ac.ir/article-1-582-en.html}, eprint = {http://jgit.kntu.ac.ir/article-1-582-en.pdf}, journal = {Engineering Journal of Geospatial Information Technology}, issn = {2008-9635}, eissn = {}, year = {2021} } @article{ author = {Papi, Ramin and Argany, Meysam and Moradipour, Shahab and Soleimani, Masou}, title = {Modeling the potential of Sand and Dust Storm sources formation using time series of remote sensing data, fuzzy logic and artificial neural network (A Case study of Euphrates basin)}, abstract ={Sand and Dust Storms (SDS) are known as one of the most common environmental problems in arid and semi-arid regions of the world. This phenomenon is harmful to human health as well as to economy. Over the past two decades, SDS have been increasing on a local, regional and even global scale. The Euphrates Basin is recognized as one of the most active SDS sources in the world. The first step in managing this environmental phenomenon, is to identify dust storm sources. The aim of this study is mapping the potential sources of SDS in the Euphrates basin by using Multi-Layer Perceptron Neural Network. In the first step, the long-term time series of which is data, related to key environmental parameters affecting the occurrence of SDS including: soil moisture, soil temperature, soil texture, land surface temperature, wind speed, precipitation, evapotranspiration, dusty months, land use population, pressure, the identified elevation and slope were used as artificial neural network model inputs. Using the visual interpretation of 2500 MODIS images in natural color composite, 190 SDS centers were identified visually and introduced to the neural network as training points. 70% of the points (133 points) and 30% of them (57 points) were used for training, testing and validation of model, respectively. After running the model, the estimated mean squared error (MSE) was equal to 0.1, which indicats acceptable accuracy of the neural network model in mapping the potential SDS sources. The results show that, 147000 km2 of the basin is prone to the formation of SDS sources, which mainly include low rainfall, dry and barren areas of the basin.}, Keywords = {Sand and Dust Storm (SDS), Remote Sensing, Time Series, Artificial Neural Network, Euphrates Basin}, volume = {8}, Number = {3}, pages = {61-82}, publisher = {kntu}, doi = {10.52547/jgit.8.3.61}, url = {http://jgit.kntu.ac.ir/article-1-737-en.html}, eprint = {http://jgit.kntu.ac.ir/article-1-737-en.pdf}, journal = {Engineering Journal of Geospatial Information Technology}, issn = {2008-9635}, eissn = {}, year = {2021} } @article{ author = {Golmohamadi, Mehdi and Joodaki, Gholamrez}, title = {Statistical downscaling of GRACE gravity satellite-derived groundwater level data}, abstract ={With the continued threat from climate change, population growth and followed by increasing water demand, the need for hydrological data with high spatial resolution and proper time coverage to be felt more than ago. Therefore, having data such as terrestrial water storage changes and groundwater level changes with high resolution spatial helps to plan and make decisions for water resource management more efficiently. Since the beginning of the GRACE mission, evaluation of water resources, especially groundwater level changes has been provided at a global scale. Despite the wide coverage area, due to the low spatial resolution and large pixel size (~200,000 km2), the use of GRACE data for local and smaller scales, it isn’t possible. Therefore, the purpose of this research is the feasibility of downscaling GRACE data to small and local scale. In this study, used from an empirical regression method based on the relationship between GRACE and other hydrological data and created groundwater level changes data with 0.25 degree gridded. Finally, used from groundwater level changes data derived from monitoring wells to validate downscaled groundwater level changes, where RMSE value between 38.17 mm to 56.4 mm, and R2 between 0.49 to 0.54 were obtained. Therefore, this method can improve GRACE data resolution from 1° to 0.25°, effectively.}, Keywords = {GRACE, Statistical downscaling, Groundwater level changes, Water resource management}, volume = {8}, Number = {3}, pages = {83-101}, publisher = {kntu}, doi = {10.52547/jgit.8.3.83}, url = {http://jgit.kntu.ac.ir/article-1-779-en.html}, eprint = {http://jgit.kntu.ac.ir/article-1-779-en.pdf}, journal = {Engineering Journal of Geospatial Information Technology}, issn = {2008-9635}, eissn = {}, year = {2021} } @article{ author = {Atighi, Fateme and Safdarinezhad, Alireza and Karimi, Rohollah}, title = {Target Detection Improvements in Hyperspectral Images by Adjusting Band Weights and Identifying end-members in Feature Space Clusters}, abstract ={          Spectral target detection could be regarded as one of the strategic applications of hyperspectral data analysis. The presence of targets in an area smaller than a pixel’s ground coverage has led to the development of spectral un-mixing methods to detect these types of targets. Usually, in the spectral un-mixing algorithms, the similar weights have been assumed for spectral bands. However, the various uncertainties such as the different effects of the atmospheric conditions and the relative radiometric calibration of the sensor lead to differentiations data recorded in each band. So, the Modification of the weights of the spectral bands is the first objective of this paper in order to improve the accuracy of target detection in the spectral un-mixing process. Considering the complexities of direct estimation of the band weights, an algorithm based on the Variance Component Estimation (VCE) is proposed to optimize the weights of the spectral bands. On the other hand, in addition to the availability of target spectrums, the spectral response of the backgrounds is a necessity to perform reliable target detection. The unsupervised detection of the background endmembers is known as the popular way of doing that. The second contribution of this paper is the proposal of cluster-based background detection to be used in the target detection process. It prevents the presence of the unrelated endmembers in each cluster which has improved the spectral un-mixing for target detection. The proposed methods have been implemented in the target detectors of Unconstrained Linear Spectral Un-mixing (UCLSU), Sum to one Constrained Linear Spectral Un-mixing (SCLSU), Non-negativity Constrained Linear Spectral Un-mixing (NCLSU), and Fully Constrained Linear Spectral Un-mixing (FCLSU). The results indicate their success in the improvement of the target detection accuracies. Considering the best choice on the number of spectral clusters and the number of background endmembers, accuracy improvement of up to 17 percent in the target detection has occurred.}, Keywords = {Hyperspectral imaging, Target detection, Variance Component Estimation (VCE), Spectral weighting, Spectral un-mixing.}, volume = {8}, Number = {3}, pages = {103-122}, publisher = {kntu}, doi = {10.52547/jgit.8.3.103}, url = {http://jgit.kntu.ac.ir/article-1-733-en.html}, eprint = {http://jgit.kntu.ac.ir/article-1-733-en.pdf}, journal = {Engineering Journal of Geospatial Information Technology}, issn = {2008-9635}, eissn = {}, year = {2021} }