I have this code, which I largely modified from Matplotlib: How to plot images instead of points?
See my output graph: Output
I would like to rotate each hamster picture with respect to the column "Rotation" in the dataframe. So that the pictures are orientated in the correct rotation.
How do I do this? I am struggling to understand the "offsetbox" guide for Matplotlib.
These are the first 10 rows on my dataframe.
import pandas as pd
df = pd.DataFrame([['21:21:00',0.1,0.0,10], ['21:21:01',0.1,0.0,20], ['21:21:02',0.1,0.0,28]\
,['21:21:03',0.1,0.0,12], ['21:21:03',0.1,0.0,12], ['21:21:04',0.5,0.6,12]\
,['21:21:05',3.7,4.4,10], ['21:21:06',6.8,8.1,10], ['21:21:07',9.9,11.9,20]\
,['21:21:08',13.0,15.7,29], ['21:21:09',16.1,19.5,33]]\
,columns=['Time', 'Northings', 'Eastings','Rotation'])
def main():
x = df['Eastings'][::2]
y = df['Northings'][::2]
image_path = get_sample_data(r'C:\Users\j.smith.EA.000\Desktop\PYTHON\hamster.jpg')
fig, ax = plt.subplots()
imscatter(x, y, image_path, zoom=0.03, ax=ax)
ax = df.plot(x = 'Eastings', y = "Northings", grid = True, figsize=(15,8), legend = False\
, xlim = (-30,30), ylim = (-30,30), kind = 'line', ax=ax)
plt.show()
def imscatter(x, y, image, ax=None, zoom=1):
image = plt.imread(image)
im = OffsetImage(image, zoom=zoom)
x, y = np.atleast_1d(x, y)
artists = []
for x0, y0 in zip(x, y):
ab = AnnotationBbox(im, (x0, y0), frameon=False,)
artists.append(ax.add_artist(ab))
return artists
main()
Going through your code, in your imscatter() function the for loop is assigning each image to each datapoint. You are passing the image to
ab = AnnotationBbox(im, (x0, y0), frameon=False,) where im is your image object.
Here, I would suggest passing the image after rotating it to whatever degree you want.
For ex:
im = rotate_image_by_angle(im, get_the_rotation_angle_from_colume)
ab = AnnotationBbox(im, (x0, y0), frameon=False,)
artists.append(ax.add_artist(ab))
This approach is implemented in the following code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from matplotlib.cbook import get_sample_data
import cv2
import imutils
df = pd.DataFrame([['21:21:00',0.1,0.0,0], ['21:21:01',3.1,3.0,20], ['21:21:02',6.1,6.0,30]\
,['21:21:03',9.1,9.0,40], ['21:21:03',12.1,12.0,50], ['21:21:04',15.1,15.2,60]\
,['21:21:05',18.1,18.0,70], ['21:21:06',21.1,21.0,80], ['21:21:07',24.0,24.1,90]\
,['21:21:08',27.0,27.1,100], ['21:21:09',30.0,30.1,110]]\
,columns=['Time', 'Northings', 'Eastings','Rotation'])
def main():
x = df['Eastings'][::2]
y = df['Northings'][::2]
z = df['Rotation'][::2]
fig, ax = plt.subplots()
imscatter(x, y, z, zoom=0.03, ax=ax)
ax = df.plot(x = 'Eastings', y = "Northings", grid = True, figsize=(15,7), legend = False\
, xlim = (-5,30), ylim = (-5,30), kind = 'line', ax=ax)
plt.show()
def imscatter(x, y, z, ax=None, zoom=1):
image = cv2.imread('image.png')
im = OffsetImage(image, zoom=zoom)
x, y, z = np.atleast_1d(x, y, z)
artists = []
for x0, y0, z0 in zip(x, y, z):
rotated = rotate_bound(image, z0)
im = OffsetImage(rotated, zoom=zoom)
ab = AnnotationBbox(im, (x0, y0), frameon=False,)
artists.append(ax.add_artist(ab))
return artists
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH), borderValue=(255,255,255))
main()
I have made minor changes throughout the code and added a function rotate_bound(image, angle) which will rotate the image by a given angle. More details on how it was done, can be found here.
The Output now looks like this...
This is my code:
import matplotlib.pyplot as plt
from matplotlib.patches import RegularPolygon
import numpy as np
offCoord = [[-2,-2],[-1,-2],[0,-2],[1,-2],[2,-2]]
fig, ax = plt.subplots(1)
ax.set_aspect('equal')
for c in offCoord:
hex = RegularPolygon((c[0], c[1]), numVertices=6, radius=2./3., alpha=0.2, edgecolor='k')
ax.add_patch(hex)
plt.autoscale(enable = True)
plt.show()
Expected result vs actual result in the attached image
Please tell me why my hexagons are not lined up edge by edge but overlap each other?
What am I doing wrong?
Use law of cosines (for isosceles triangle with angle 120 degrees and sides r, r, and 1):
1 = r*r + r*r - 2*r*r*cos(2pi/3) = r*r + r*r + r*r = 3*r*r
r = sqrt(1/3)
This is the right code:
import matplotlib.pyplot as plt
from matplotlib.patches import RegularPolygon
import numpy as np
offCoord = [[-2,-2],[-1,-2],[0,-2],[1,-2],[2,-2]]
fig, ax = plt.subplots(1)
ax.set_aspect('equal')
for c in offCoord:
# fix radius here
hexagon = RegularPolygon((c[0], c[1]), numVertices=6, radius=np.sqrt(1/3), alpha=0.2, edgecolor='k')
ax.add_patch(hexagon)
plt.autoscale(enable = True)
plt.show()
Very simply, your geometry is wrong. You specified a radius of 2/3. Check your documentation for RegularPolygon; I think that you'll find the correct radius is 0.577 (sqrt(3) / 3) or something close to that.
Radius of regular hexagon equals its side. In that case, the proper offset should be:
offset = radius*3**0.5. If radius is 2/3, the offsets should be 1.1547k, where k=-2,-1...
I was able to create a FFT Transformation from my image but I don't know how to continue...
I am using this to solve my problem: Align text for OCR
This code was all that worked for me until now:
import cv2
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
img = cv2.imread(r'test.jpg', cv2.IMREAD_GRAYSCALE)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20 * np.log(np.abs(fshift))
plt.subplot(121), plt.imshow(img, cmap='gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(magnitude_spectrum, cmap='gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
I need the mean value generated from a Polar Transformation, but I don't know how to transform a FFT to a Polar Transformation in Python.
This is roughly solution to you problem; It was tested on one sample image, and the result looks credible.
# your code goes here...
def transform_data(m):
dpix, dpiy = m.shape
x_c, y_c = np.unravel_index(np.argmax(m), m.shape)
angles = np.linspace(0, np.pi*2, min(dpix, dpiy))
mrc = min(abs(x_c - dpix), abs(y_c - dpiy), x_c, y_c)
radiuses = np.linspace(0, mrc, max(dpix, dpiy))
A, R = np.meshgrid(angles, radiuses)
X = R * np.cos(A)
Y = R * np.sin(A)
return A, R, m[X.astype(int) + mrc - 1, Y.astype(int) + mrc - 1]
angles, radiuses, m = transform_data(magnitude_spectrum)
plt.contourf(angles, radiuses, m)
Finally, we can get the angle we want to turn the original image:
sample_angles = np.linspace(0, 2 * np.pi, len(c.sum(axis=0))) / np.pi*180
turn_angle_in_degrees = 90 - sample_angles[np.argmax(c.sum(axis=0))]
For my sample image I got:
turn_angle_in_degrees = 3.2015810276679844 degrees.
Also, we can plot projected spectrum magnitude:
plt.plot(sample_angles, c.sum(axis=0))
Hope that helps...
I'm trying to take two rectangular images, one of visible surface features and one representing elevation, and map them onto a 3D sphere. I know how to map features onto a sphere with Cartopy, and I know how to make relief surface maps, but I can't find a simple way to combine them to have exaggerated elevation on a spherical projection. For an example, here's it done in MATLAB:
Does anybody know if there's a simple way to do this in Python?
My solution does not meet all of your requirements. But it could be a good starter, to begin with.
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from matplotlib.cbook import get_sample_data
from matplotlib._png import read_png
# Use world image with shape (360 rows, 720 columns)
pngfile = 'temperature_15-115.png'
fn = get_sample_data(pngfile, asfileobj=False)
img = read_png(fn) # get array of color
# Some needed functions / constant
r = 5
pi = np.pi
cos = np.cos
sin = np.sin
sqrt = np.sqrt
# Prep values to match the image shape (360 rows, 720 columns)
phi, theta = np.mgrid[0:pi:360j, 0:2*pi:720j]
# Parametric eq for a distorted globe (for demo purposes)
x = r * sin(phi) * cos(theta)
y = r * sin(phi) * sin(theta)
z = r * cos(phi) + 0.5* sin(sqrt(x**2 + y**2)) * cos(2*theta)
fig = plt.figure()
fig.set_size_inches(9, 9)
ax = fig.add_subplot(111, projection='3d', label='axes1')
# Drape the image (img) on the globe's surface
sp = ax.plot_surface(x, y, z, \
rstride=2, cstride=2, \
facecolors=img)
ax.set_aspect(1)
plt.show()
The resulting image:
I am trying my hand at image processing and my goal is to output the measurements of a human hand given an image of a human hand as the input. My current thought process is to include a quarter in the image to provide a reference value. Therefore, my input looks like this:
I am currently using scikit-image for image processing, and my code looks like this:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from skimage import data
from skimage.filter import threshold_otsu
from skimage.segmentation import clear_border
from skimage.morphology import label, closing, square
from skimage.measure import regionprops
from skimage.color import label2rgb
from skimage import io, color
#image = data.coins()[50:-50, 50:-50]
filename = io.imread("hand2.JPG")
image = color.rgb2gray(filename)
# apply threshold
thresh = threshold_otsu(image)
bw = closing(image > thresh, square(3))
# remove artifacts connected to image border
cleared = bw.copy()
#clear_border(cleared)
# label image regions
label_image = label(cleared)
borders = np.logical_xor(bw, cleared)
label_image[borders] = -1
image_label_overlay = label2rgb(label_image, image=image)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
# skip small images
if region.area < 1000:
continue
print "Perimeter: "
print region.perimeter
print "Area: "
print region.area
print ""
# draw rectangle around segments
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
plt.show()
I am able to segment my image into regions, but I don't know how to convert my hand segment into measurements for the individual fingers and width of the hand. I think I'm close, I just don't quite know how to proceed!
EDIT: Maybe I should be using opencv for this?
It wasn't clear exactly what you wanted as output, but here is my best guess. I used the SLIC segmentation algorithm to identify regions in the image. Based on their region properties (area), I choose the largest two (hand and coin) and display them, along with their principal axes.
import numpy as np
import matplotlib.pyplot as plt
import math
from skimage import io, segmentation, measure, color
image = io.imread("hand2.JPG")
label_image = segmentation.slic(image, n_segments=2)
label_image = measure.label(label_image)
regions = measure.regionprops(label_image)
areas = [r.area for r in regions]
ix = np.argsort(areas)
hand = regions[ix[-1]]
coin = regions[ix[-2]]
selected_labels = np.zeros_like(image[..., 0], dtype=np.uint8)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
for n, region in enumerate([hand, coin]):
selected_labels[region.coords[:, 0], region.coords[:, 1]] = n + 2
y0, x0 = region.centroid
orientation = region.orientation
x1 = x0 + math.cos(orientation) * 0.5 * region.major_axis_length
y1 = y0 - math.sin(orientation) * 0.5 * region.major_axis_length
x2 = x0 - math.sin(orientation) * 0.5 * region.minor_axis_length
y2 = y0 - math.cos(orientation) * 0.5 * region.minor_axis_length
ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)
ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)
ax.plot(x0, y0, '.g', markersize=15)
image_label_overlay = color.label2rgb(selected_labels, image=image, bg_label=0)
ax.imshow(image_label_overlay, cmap='gray')
ax.axis('image')
plt.show()