I want to calculate the ndvi from a Sentinel-2 image.
import os
import numpy as np
import rasterio as rio
# suppress true divide warning numpy for 0 divide
np.seterr(divide='ignore', invalid='ignore')
red_f = absolute/path/to/band/4
nir_f = absolute/path/to/band/8
def calc_ndvi():
with rio.open(red_f) as src:
red = src.read()
red = red.astype(np.float64)
with rio.open(nir_f) as src:
nir = src.read()
nir = red.astype(np.float64)
ndvi = np.divide((nir - red),(nir + red))
return ndvi
ndvi = calc_ndvi()
The 'red' and 'nir' originally get loaded in as 'Array of uint16' with a shape of (1, 10980, 10980). I convert this to a float before the calculation using astype. As far as I know it's not necessary to flatten the array to a 2d shape. I have tried this but this didn't work.
The result unfortunately is an array completely filled with 0's.
What am I doing wrong?
You have a typo:
nir = red.astype(np.float64)
Should be:
nir = nir.astype(np.float64)
In:
ndvi = np.divide((nir - red),(nir + red))
You are really doing:
ndvi = np.divide((red - red),(red + red))
Which results in array of 0
hi i use open cv in python to count palm tree in my project. i use this code but this code just answer for simple pictures like coins. but my image is palm trees and when i use this code its do not count right. it count plam tree 2 while real palms number is about 100. how i can fix this problem and solve my problems?
thanks.
the code:
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Read image
I = cv2.imread('drops.jpg',0);
# Threshold
IThresh = (I>=118).astype(np.uint8)*255
# Remove from the image the biggest conneced componnet
# Find the area of each connected component
connectedComponentProps = cv2.connectedComponentsWithStats(IThresh, 8, cv2.CV_32S)
IThreshOnlyInsideDrops = np.zeros_like(connectedComponentProps[1])
IThreshOnlyInsideDrops = connectedComponentProps[1]
stat = connectedComponentProps[2]
maxArea = 0
for label in range(connectedComponentProps[0]):
cc = stat[label,:]
if cc[cv2.CC_STAT_AREA] > maxArea:
maxArea = cc[cv2.CC_STAT_AREA]
maxIndex = label
# Convert the background value to the foreground value
for label in range(connectedComponentProps[0]):
cc = stat[label,:]
if cc[cv2.CC_STAT_AREA] == maxArea:
IThreshOnlyInsideDrops[IThreshOnlyInsideDrops==label] = 0
else:
IThreshOnlyInsideDrops[IThreshOnlyInsideDrops == label] = 255
# Fill in all the IThreshOnlyInsideDrops as 0 in original IThresh
IThreshFill = IThresh
IThreshFill[IThreshOnlyInsideDrops==255] = 0
IThreshFill = np.logical_not(IThreshFill/255).astype(np.uint8)*255
plt.imshow(IThreshFill)
# Get numberof drops and cover precntage
connectedComponentPropsFinal = cv2.connectedComponentsWithStats(IThreshFill, 8, cv2.CV_32S)
NumberOfDrops = connectedComponentPropsFinal[0]
CoverPresntage = float(np.count_nonzero(IThreshFill==0)/float(IThreshFill.size))
# Print
print "Number of drops = " + str(NumberOfDrops)
print "Cover precntage = " + str(CoverPresntage)
i want count palm tree and i try different codes but dont result.
I use dlib.train_simple_object_detector to create detector for steel bars in bunch. This is my sample:
It has SAME box for every bar (created via Duplicate RectBox in labelImg) and size of box is 122x118 (14396 in area).
This is my training code:
import dlib
import cv2.cv2 as cv2
import os
import time
import sys
from xml.dom import minidom
if len(sys.argv) != 4:
print("Usage: python train.py /path/to/images/ /path/to/boxes/ /path/to/result.svm")
print("Images and boxes are named like 1.jpg and 1.xml")
exit(1)
data = {}
image_indexes = [int(img_name.split(".")[0]) for img_name in os.listdir(sys.argv[1])]
# np.random.shuffle(image_indexes)
image_indexes.sort()
# parse rectangle data
for index in image_indexes:
if index in [0]:
continue
rects = minidom.parse("{}/{}.xml".format(sys.argv[2], index)).getElementsByTagName("bndbox")
img = cv2.imread(os.path.join(sys.argv[1], str(index) + ".jpg"))
for rect in rects:
xmin = int(rect.getElementsByTagName("xmin")[0].firstChild.data)
xmax = int(rect.getElementsByTagName("xmax")[0].firstChild.data)
ymin = int(rect.getElementsByTagName("ymin")[0].firstChild.data)
ymax = int(rect.getElementsByTagName("ymax")[0].firstChild.data)
dlib_box = dlib.rectangle(left=xmin, top=ymin, right=xmax, bottom=ymax)
if index in data:
data[index][1].append(dlib_box)
else:
data[index] = (img, [dlib_box])
# train
percent = 0.8
split = int(len(data) * percent)
images = [tuple_value[0] for tuple_value in data.values()]
bounding_boxes = [tuple_value[1] for tuple_value in data.values()]
options = dlib.simple_object_detector_training_options()
options.add_left_right_image_flips = False
options.C = 5
options.num_threads = 16
options.epsilon = 0.01
# options.be_verbose = True
st = time.time()
detector = dlib.train_simple_object_detector(images[:split], bounding_boxes[:split], options)
print("Training complete. Time taken: {:.2f} seconds.".format(time.time() - st))
print("Training Metrics: {}".format(dlib.test_simple_object_detector(images[:split], bounding_boxes[:split], detector)))
detector.save(sys.argv[3])
When I run it with this sample it gives an error:
Error! An impossible set of object boxes was given for training. All the boxes
need to have a similar aspect ratio and also not be smaller than about 400
pixels in area.
But it's not true. They definitely have same aspect ratio as they are same boxes and they do have area > 400 (about 14000 actually). Why do this happen?
I am trying to convert netCDF files to EPSG:3857 for use with Mapbox by using GDAL. This would be .nc to .nc conversion. Not to raster. I am open to using GDAL or other methods to do this. This data must be reprojected before it goes to a console app - and this process is taking weeks to find a solution for - I figured it was simple.
I am working on colorizing satellite data. There are 3 .nc files (blue, red, and infrared) that when combined and processed create a color image. After the 3 files are downloaded (from Amazon AWS), a python console app does the processing and dumps a .jpg to the same folder. The source code for that application is Located here so you may validate the data. (It is slow as the files are super high resolution).
The code I have tried is :
gdalwarp -t_srs EPSG:3857 test.nc test-projected.nc
However, there have been several other variations tried and nothing works.
I am not a professional with this, but should I even be using gdalwarp to do this? I only want to change the projection - nothing else, so the python app can still work with the data. It must be able to create the .jpg using the reprojected files.
The following links are samples of the data that needs to be converted :
.nc file on AWS > Color Channel 1 (Blue 1km resolution)
.nc file on AWS > Color Channel 2 (Red, Higher 0.5km resolution & larger file size)
.nc file on AWS > Color Channel 3 (Infrared - serves as green)
Additionaly, someone else online has accomplished this using a similar projection via the pyproj module at https://github.com/blaylockbk/pyBKB_v2/tree/master/BB_GOES16. (Mine must be EPSG:3857 for use with Mapbox). If the python code were modified to do this all in one go, that would be great too. I am opening a bounty as the final hope.
I do not know python, so I have been attempting GDAL for the most part- however working python code added to my source code to achieve the expected result (or a working GDAL script) will earn the bounty.
Here is my solution:
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 17:39:45 2019
#author: Guy Serbin
"""
import os, sys, glob, argparse
from osgeo import gdal, osr
from scipy.misc import imresize
parser = argparse.ArgumentParser(description = 'Script to create CONUS true color image from GOES 16 L1b data.')
parser.add_argument('-i', '--indir', type = str, default = r'C:\Data\Freelancer\DavidHolcomb', help = 'Input directory name.')
parser.add_argument('-o', '--outdir', type = str, default = None, help = 'Output directory name.')
parser.add_argument('-p', '--proj', type = int, default = 3857, help = 'Output projection, must be EPSG number.')
args = parser.parse_args()
if not args.indir:
print('ERROR: --indir not set. exiting.')
sys.exit()
elif not os.path.isdir(args.indir):
print('ERROR: --indir not set to a valid directory path. exiting.')
sys.exit()
if not args.outdir:
print('WARNING: --outdir not set. Output will be written to --indir.')
args.outdir = args.indir
o_srs = osr.SpatialReference()
o_srs.ImportFromEPSG(args.proj)
# based upon code ripped from https://riptutorial.com/gdal/example/25859/read-a-netcdf-file---nc--with-python-gdal
# Path of netCDF file
netcdf_red = glob.glob(os.path.join(args.indir, 'OR_ABI-L1b-RadC-M3C02_G16_s*.nc'))[0]
netcdf_green = glob.glob(os.path.join(args.indir, 'OR_ABI-L1b-RadC-M3C03_G16_s*.nc'))[0]
netcdf_blue = glob.glob(os.path.join(args.indir, 'OR_ABI-L1b-RadC-M3C01_G16_s*.nc'))[0]
baselist = os.path.basename(netcdf_blue).split('_')
outputfilename = os.path.join(args.outdir, 'OR_ABI-L1b-RadC-M3TrueColor_1_G16_{}.tif'.format(baselist[3]))
print('Output file will be: {}'.format(outputfilename))
tempfile = os.path.join(args.outdir, 'temp.tif')
# Specify the layer name to read
layer_name = "Rad"
# Open netcdf file.nc with gdal
print('Opening red band file: {}'.format(netcdf_red))
dsR = gdal.Open("NETCDF:{0}:{1}".format(netcdf_red, layer_name))
print('Opening green band file: {}'.format(netcdf_green))
dsG = gdal.Open("NETCDF:{0}:{1}".format(netcdf_green, layer_name))
print('Opening blue band file: {}'.format(netcdf_blue))
dsB = gdal.Open("NETCDF:{0}:{1}".format(netcdf_blue, layer_name))
red_srs = osr.SpatialReference()
red_srs.ImportFromWkt(dsR.GetProjectionRef())
i_srs = osr.SpatialReference()
i_srs.ImportFromWkt(dsG.GetProjectionRef())
GeoT = dsG.GetGeoTransform()
print(i_srs.ExportToWkt())
red_transform = osr.CoordinateTransformation(red_srs, o_srs)
transform = osr.CoordinateTransformation(i_srs, o_srs)
# Read full data from netcdf
print('Reading red band into memory.')
red = dsR.ReadAsArray(0, 0, dsR.RasterXSize, dsR.RasterYSize)
print('Resizing red band to match green and blue bands.')
red = imresize(red, 50, interp = 'bicubic')
print('Reading green band into memory.')
green = dsG.ReadAsArray(0, 0, dsG.RasterXSize, dsG.RasterYSize)
print('Reading blue band into memory.')
blue = dsB.ReadAsArray(0, 0, dsB.RasterXSize, dsB.RasterYSize)
red[red < 0] = 0
green[green < 0] = 0
blue[blue < 0] = 0
# Stack data and output
print('Stacking data.')
driver = gdal.GetDriverByName('GTiff')
stack = driver.Create('/vsimem/stack.tif', dsB.RasterXSize, dsB.RasterYSize, 3, gdal.GDT_Int16)
stack.SetProjection(i_srs.ExportToWkt())
stack.SetGeoTransform(GeoT)
stack.GetRasterBand(1).WriteArray(red)
stack.GetRasterBand(2).WriteArray(green)
stack.GetRasterBand(3).WriteArray(blue)
print('Warping data to new projection.')
warped = gdal.Warp('/vsimem/warped.tif', stack, dstSRS = o_srs, outputType = gdal.GDT_Int16)
print('Writing output to disk.')
outRaster = gdal.Translate(outputfilename, '/vsimem/warped.tif')
outRaster = None
red = None
green = None
blue = None
tmp_ds = None
dsR = None
dsG = None
dsB = None
print('Processing complete.')
You can use rioxarray to do this. An example of doing so is here: https://corteva.github.io/rioxarray/html/examples/reproject.html
Here is an example targeted for your use case:
import rioxarray
xds = rioxarray.open_rasterio("OR_ABI-L1b-RadC-M3C01_G16_s20190621802131_e20190621804504_c20190621804546.nc")
<xarray.Dataset>
Dimensions: (band: 1, x: 5000, y: 3000)
Coordinates:
* y (y) float64 1.584e+06 1.585e+06 ... 4.588e+06 4.589e+06
* x (x) float64 -3.627e+06 -3.626e+06 ... 1.381e+06 1.382e+06
* band (band) int64 1
spatial_ref int64 0
Data variables:
Rad (band, y, x) int16 ...
DQF (band, y, x) int8 ...
xds.rio.crs
CRS.from_wkt('PROJCS["unnamed",GEOGCS["unknown",DATUM["unnamed",SPHEROID["Spheroid",6378137,298.2572221]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]]],PROJECTION["Geostationary_Satellite"],PARAMETER["central_meridian",-75],PARAMETER["satellite_height",35786023],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],EXTENSION["PROJ4","+proj=geos +lon_0=-75 +h=35786023 +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs +sweep=x"]]')
Then, reproject:
xds_3857 = xds.rio.reproject("epsg:3857")
<xarray.Dataset>
Dimensions: (band: 1, x: 7693, y: 4242)
Coordinates:
* x (x) float64 -1.691e+07 -1.691e+07 ... -5.892e+06 -5.891e+06
* y (y) float64 7.714e+06 7.712e+06 ... 1.641e+06 1.64e+06
* band (band) int64 1
spatial_ref int64 0
Data variables:
Rad (band, y, x) int16 1023 1023 1023 1023 ... 1023 1023 1023 1023
DQF (band, y, x) int8 0 0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0
Attributes:
creation_date: 2019-09-25 01:02:54.590053
xds_3857.rio.crs
CRS.from_epsg(3857)
Write to netcdf:
xds_3857.to_netcdf("epsg3857.nc")
first time I'm in the forum. Hope I'm specific enough.
Using ImageChops inside PIL, I'm trying to multiply two images (both mode="L") but I always get the same error message. I've looked everywhere but couldn't find anything useful. I'd greatly appreciate any helpful ideas!
The relevant part of the code is attached.
def point(self, f, searchImage, technique): # technique - inpaint or bicubic
dimx, dimy = searchImage.size
reader = csv.reader(f)
for line in reader: #f.readlines():
coord = line
print coord
if searchImage.size[0] > float(coord[0])+95.5 and searchImage.size[1]\
> float(coord[1])+95.5:
box = (float(coord[0])-93.5,float(coord[1])-93.5,\
float(coord[0])+95.5,float(coord[1])+95.5) # left upper right
elif searchImage.size[0] < float(coord[0])+95.5 and searchImage.size[1]\
> float(coord[1])+95.5:
box = (float(coord[0])-93.5,float(coord[1])-93.5,\
searchImage.size[0]-0.5,float(coord[1])+95.5) # size of box
# depends on pixel size. A pixel size of 14 micrometer results in a
# cross size of 189 pixels
else:
box = (float(coord[0])-93.5,float(coord[1])-93.5,\
float(coord[0])+95.5,searchImage.size[1]-0.5)
box = (math.floor(box[0]), math.floor(box[1]), math.floor(box[2]),\
math.floor(box[3]))
searchCrop = searchImage.crop(box)
c_x = int(float(coord[1]))
c_y = int(float(coord[0]))
abst_y = c_x - int(math.floor(box[1])) - 1 # x shift
center = num.asarray(searchImage)[c_x,c_y]
if center == 0:
center = center + 0.00001 # to avoid division by zero
val = [num.asarray(searchImage)[c_x-1,c_y+1], num.asarray(searchImage)\
[c_x-1,c_y-1], num.asarray(searchImage)[c_x+1,c_y-1], \
num.asarray(searchImage)[c_x+1,c_y+1]] # ERDAS upper right,
# upper left, lower left, lower right
val_dict = {0:1,1:-1,2:-1,3:1}
flag = val_dict[val.index(min(val))]
if float(min(val))/center > 2. or min(val) > 100:
flag = 0
newima = num.zeros( (searchCrop.size[1], searchCrop.size[0]),\
dtype = "float")
Ayo = num.array(int(searchCrop.size[0])*[255])
Ay = num.array((abst_y + flag)*[255] + 3*[0] + ((int(searchCrop.size[0]\
)-3-abst_y)-flag)*[255])
Ax = num.array(int(searchCrop.size[0])*[0])
Kx = num.array(3*[Ayo] + ((int(searchCrop.size[1])-9)/2+flag)*[Ay] + 3*[Ax] \
+ ((int(searchCrop.size[1])-9)/2-flag)*[Ay] + 3*[Ayo])
Kxlist = list(itertools.chain(*Kx))
i=0
for y in range(int(searchCrop.size[1])):
for x in range(int(searchCrop.size[0])):
newima[y,x] = Kxlist[i+y+x]
i=i+x
kernel = Image.fromarray(newima)
kernel = kernel.convert(mode="L")
# -----
modified = ImageChops.multiply(searchCrop,kernel) # Results in an image
# where the pixels along the cross axes will get a value of 0
# ---
The error message is the following:
File "D:\GIS_dbase\Data\hma_cci\hexagon\KH9_Python\interpolate_cross.py", line 58, in
crossInterpolation filledImage = self.p_model.point(f, searchImage, method)
File "D:\GIS_dbase\Data\hma_cci\hexagon\KH9_Python\interpolate_cross.py", line 207, in
point modified = ImageChops.multiply(searchCrop,kernel) # Results in an image where
the pixels along the cross axes will get a value of 0
File "C:\Python27\lib\site-packages\PIL\ImageChops.py", line 119, in multiply
image1.load()
File "C:\Python27\lib\site-packages\PIL\Image.py", line 1730, in load
self.im = self.im.crop(self.__crop)
TypeError: integer argument expected, got float
The issue is that PIL's crop method takes a tuple of 4 integer values but you are passing it floats. This should work:
box = tuple([int(math.floor(x)) for x in box])