I am trying to extract coordinates of all features from the mediapipe library using face mesh but for every image that I am testing it's giving the same coordinates. I don't understand what's wrong here. If anyone could help, it would be great!
import mediapipe as mp
import cv2
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
import json
import os
from os import path
file_name = "./json_output"
img_base = cv2.imread("./johnny-depp-sunglasses-hat-smile-wallpaper.jpg")
img = img_base.copy()
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True)
results = face_mesh.process(img)
landmarks = results.multi_face_landmarks[0]
xs, ys, zs = [], [], []
CONTOUR_LIST = ["FACEMESH_LIPS", "FACEMESH_FACE_OVAL", "FACEMESH_LEFT_IRIS",
"FACEMESH_LEFT_EYEBROW","FACEMESH_LEFT_EYE", "FACEMESH_RIGHT_IRIS",
"FACEMESH_RIGHT_EYEBROW", "FACEMESH_RIGHT_EYE"]
This is the main function:
def extract_landmarks(inp):
img = img_base.copy()
for landmark in landmarks.landmark:
x = landmark.x
y = landmark.y
z = landmark.z
xs.append(x)
ys.append(y)
zs.append(z)
relative_x = int(x * img_base.shape[1])
relative_y = int(y * img_base.shape[0])
cv2.circle(img, (relative_x, relative_y),
radius=5, color=(0,0,255),
thickness=-1)
# fig = plt.figure(figsize=(15,15))
# plt.imshow(img[:,:,::-1])
# plt.show()
img = img_base.copy()
for i in inp:
for src_id, tar_id in i:
source = landmarks.landmark[src_id]
target = landmarks.landmark[tar_id]
relative_source = int(source.x * img.shape[1]), int(source.y * img.shape[0])
relative_target = int(target.x * img.shape[1]), int(target.y * img.shape[0])
cv2.line(img, relative_source, relative_target,
color=(255,255,255), thickness=2)
fig = plt.figure(figsize=(15,15))
plt.imshow(img[:,:,::-1])
plt.show()
result = inp
# print(result)
my_json = list(result)
# my_ans = [{f"{CONTOUR_LIST[k]}":{'x':x, 'y':y}} for k in range(len(CONTOUR_LIST)) for i in my_json for x,y in i]
my_ans = [{f"{CONTOUR_LIST[k]}":{'x':x, 'y':y}} for k in range(0, 8) for i in my_json for x,y in i]
#
# print(my_ans, sep="\n", end="\n")
# print("\n")
# print("\n")
# coordinates.append(my_ans)
# print(my_ans, end="\n", sep="\n")
if os.path.exists(file_name):
print("Already exists!")
# with open(file_name, 'w') as f:
# f.write(json.dumps(my_ans, indent=4, separators=(',',': ')))
else:
with open(file_name, 'w') as file:
json.dump(my_ans, file,
indent=4,
separators=(',',': '))
return len(my_json)
And this is the code for calling the function:
features = []
features.append(mp_face_mesh.FACEMESH_LIPS)
features.append(mp_face_mesh.FACEMESH_FACE_OVAL)
features.append(mp_face_mesh.FACEMESH_LEFT_IRIS)
features.append(mp_face_mesh.FACEMESH_LEFT_EYEBROW)
features.append(mp_face_mesh.FACEMESH_LEFT_EYE)
features.append(mp_face_mesh.FACEMESH_RIGHT_IRIS)
features.append(mp_face_mesh.FACEMESH_RIGHT_EYEBROW)
features.append(mp_face_mesh.FACEMESH_RIGHT_EYE)
extract_landmarks(features)
For every image, I am getting the same coordinates.
The coordinates in json_output is fixed, look at the face mesh.
The content of json_output is the numbers in the image.
relative_x and relative_y are the coordinates relative to the width and
height of the picture.
Related
I still new with python, and I want to print several propreties of multiple images to a csv file. I have tried How to f.write .append results to CSV but I still can't figure out where I went wrong. So, I very much appreciate your help.
Here is my code
import csv
import cv2
import glob
import numpy as np
filename1s = []
widths = []
heights = []
areas = []
rect_areas = []
equi_diameters = []
aspect_ratios = []
extents = []
solidities = []
path = 'images/*.png'
with open('file.csv','w') as f:
csv_out = csv.writer(f)
for filename1 in glob.glob(path):
imge=cv2.imread(filename1)
filename1s.append(imge)
img_maskedgray = cv2.cvtColor(imge, cv2.COLOR_BGR2GRAY)
contours2 = cv2.findContours(img_maskedgray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours2 = contours2[0] if len(contours2) == 2 else contours2[1]
big_contour2 = max(contours2, key=cv2.contourArea, default=None)
area = cv2.contourArea(big_contour2)
x,y,width,height = cv2.boundingRect(big_contour2)
aspect_ratio = float(width)/height # ratio of width to height of bounding rect of the object.
rect_area = width*height # the ratio of contour area to bounding rectangle area
extent = float(area)/rect_area
hull = cv2.convexHull(big_contour2)
hull_area = cv2.contourArea(hull)
solidity = float(area)/hull_area
equi_diameter = np.sqrt(4*area/np.pi) # diameter of the circle whose area is same as the contour area
widths.append(width)
heights.append(height)
areas.append(area)
rect_areas.append(rect_area)
equi_diameters.append(equi_diameter)
aspect_ratios.append(aspect_ratio)
extents.append(extent)
solidities.append(solidity)
csv_out.writerow([filename1, width, height, area, rect_area, equi_diameter, aspect_ratio, extent, solidity])
Thanks in advance
As stated at https://docs.python.org/3/library/csv.html if csv writter is used with file, file needs to opened with newline.
with open('file.csv','w', newline='') as f:
import csv
import cv2
import glob
import numpy as np
filename1s = []
widths = []
heights = []
areas = []
rect_areas = []
equi_diameters = []
aspect_ratios = []
extents = []
solidities = []
path = 'images/*.png'
with open('file.csv','w') as f:
csv_out = csv.writer(f)
for filename1 in glob.glob(path):
imge=cv2.imread(filename1)
filename1s.append(imge)
img_maskedgray = cv2.cvtColor(imge, cv2.COLOR_BGR2GRAY)
contours2 = cv2.findContours(img_maskedgray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours2 = contours2[0] if len(contours2) == 2 else contours2[1]
big_contour2 = max(contours2, key=cv2.contourArea, default=None)
area = cv2.contourArea(big_contour2)
x,y,width,height = cv2.boundingRect(big_contour2)
aspect_ratio = float(width)/height # ratio of width to height of bounding rect of the object.
rect_area = width*height # the ratio of contour area to bounding rectangle area
extent = float(area)/rect_area
hull = cv2.convexHull(big_contour2)
hull_area = cv2.contourArea(hull)
solidity = float(area)/hull_area
equi_diameter = np.sqrt(4*area/np.pi) # diameter of the circle whose area is same as the contour area
widths.append(width)
heights.append(height)
areas.append(area)
rect_areas.append(rect_area)
equi_diameters.append(equi_diameter)
aspect_ratios.append(aspect_ratio)
extents.append(extent)
solidities.append(solidity)
csv_out.writerow([filename1, width, height, area, rect_area, equi_diameter, aspect_ratio, extent, solidity])
.Hi all, I have 70k images saved into .h5 file and now with this script I want to read from that file and annotate text instances into .json file. When I run this script it takes very long time to annotate 1 image (cca 2h).
When I do this with 15 images then the script works fine and annotate all 15 images about a few seconds.
Now with 70k images -> .h5 file is 51gb.
I don't know is problem in code or the h5 file is too big? Because code works fine with small amount of images, but I'm working on some project where I need 70k or 700k images.
from __future__ import division
import os
import os.path as osp
from re import U
import numpy as np
import matplotlib.pyplot as plt
import h5py
from common import *
import json
import cv2
import numpy as np
from itertools import cycle
import js2py
#from gen import brojac
#from synthgen import imnames
global x
global y
def write_json(data, filename='annotation.json'):
with open(filename,'w') as file:
json.dump(data,file,indent=4)
DATA_PATH = 'results'
DB_FNAME = osp.join(DATA_PATH,'SynthText.h5')
def get_data():
return h5py.File(DB_FNAME,'r')
def viz_textbb(text_im, imageName, charBB_list, wordBB, textToList, alpha=1.0):
"""
text_im : image containing text
charBB_list : list of 2x4xn_i bounding-box matrices
wordBB : 2x4xm matrix of word coordinates
"""
#print("k",z, type(z))
plt.close(1)
plt.figure(1)
plt.imshow(text_im)
H,W = text_im.shape[:2]
global imnames
#print("MOLIIIM",wordBB)
#DODANO IZ MAIN-a
#**********************************************
db = h5py.File('results/SynthText.h5', 'r')
dsets = sorted(db['data'].keys())
for k in dsets:
db = get_data()
imnames = sorted(db['data'].keys())
start = 0
count = 0
coordinate = []
coordinate1 = []
name = []
name1 = []
final = []
upperList = []
downList = []
counter = 0
FinalFinal = []
imageData = { }
dictList = []
for eachWord in textToList:
length = len(eachWord)
for i in range(0,4):
for j in range(start,length+start):
coordinate.append([charBB_list[0][0][i][j], charBB_list[0][1][i][j]])
coordinate1.append((charBB_list[0][0][i][j], charBB_list[0][1][i][j]))
name.append(coordinate)
name1.append(coordinate1)
coordinate = []
for j in range(0, length):
for i in range(len(name)) :
#print(i,j, name[i][j]) ## koordinate da se snađem, treba
final.append(name[i][j])
#print(name)
#NEŠTA ZA CRTANJE, NEBITNO
if(i == 0 or i == 1):
upperList.append(name[i][j])
if(i == 2):
downList.append(name[i+1][j])
if(i == 3):
downList.append(name[i-1][j])
down = reversed(downList)
joinList = [*upperList,*down,upperList[0]]
FinalFinal.append(joinList)
imageData['transcription']=eachWord
imageData['language']="Latin"
imageData['illegibility']=False
imageData['points']=final
dictionary_copy = imageData.copy()
dictList.append(dictionary_copy)
del(dictionary_copy)
finalToList = np.array(final)
name=[]
final = []
upperList = []
downList = []
start = len(eachWord) + start
#del(dictList[0])
finalDict = {f'gt_{imageName}':dictList}
#print(type(finalDict)) --> dict
#print(imageName,finalDict)
#print(finalDict)
#print(len(textToList))
#print(textToList)
with open("annotation.json") as json_file:
data=json.load(json_file)
temp=data["annotations"]
#temp.append(finalDict)
temp.update(finalDict)
#temp['annotations'] = finalDict
write_json(data)
json_file.close()
for list in FinalFinal:
x,y = zip(*list)
plt.plot(x,y)
#print(x,y)
# points = tuple(zip(x,y))
# # boundaries of the bounding box
# left, right = min(points, key=lambda p: p[0]), max(points, key=lambda p: p[0])
# bottom, top = min(points, key=lambda p: p[1]), max(points, key=lambda p: p[1])
# # area
# base = right[0] - left[0]
# height = top[1] - bottom[1]
# A = base * height
#print(A)
for i in range(len(charBB_list)):
# #print(charBB_list) #ispisuje x-eve za jedan vrh svih instanci pojedinih slova, pa drugi, 3. i 4. i onda posebno y-one
bbs = charBB_list[i]
ni = bbs.shape[-1]
for j in range(ni):
bb = bbs[:,:,j]
bb = np.c_[bb,bb[:,0]] #ako se doda ,bb[:,0] -> printa isto kao i gornji lijevi
#plt.plot(bb[0,:], bb[1,:], 'r', alpha=alpha)
# plot the word-BB:
for i in range(wordBB.shape[-1]):
bb = wordBB[:,:,i] #koordinate wordBB-a
bb = np.c_[bb,bb[:,0]] #spaja skroz lijevu, TREBA
#plt.plot(bb[0,:], bb[1,:], 'g', alpha=alpha)
# visualize the indiv vertices:
vcol = ['r','g','b','k']
#for j in range(4):
#plt.scatter(bb[0,j],bb[1,j],color=vcol[j])
#print(bb) # ----> KOORDINATE wordBB-a
#print(bb[1,j])
plt.gca().set_xlim([0,W-1])
plt.gca().set_ylim([H-1,0])
plt.show(block=False)
def main(db_fname):
db = h5py.File(db_fname, 'r')
dsets = sorted(db['data'].keys())
print ("total number of images : ", colorize(Color.RED, len(dsets), highlight=True))
for k in dsets:
rgb = db['data'][k][...]
charBB = db['data'][k].attrs['charBB']
wordBB = db['data'][k].attrs['wordBB']
txt = db['data'][k].attrs['txt']
textToList = (db['data'][k].attrs['txt']).tolist()
#print(textToList)
viz_textbb(rgb, k,[charBB], wordBB, textToList)
print ("image name : ", colorize(Color.RED, k, bold=True))
print (" ** no. of chars : ", colorize(Color.YELLOW, charBB.shape[-1]))
print (" ** no. of words : ", colorize(Color.YELLOW, wordBB.shape[-1]))
print (" ** text : ", colorize(Color.GREEN, txt))
#print("To know", z[1], type(z[1]))
# OTKOMATI OVO DOLJE AKO ŽELIM STISKAT ENTER
# if 'q' in input("next? ('q' to exit) : "):
# break
db.close()
if __name__=='__main__':
main('results/SynthText.h5')
I have multiple images stored at my desktop that need to be processed one at a time by Tensorflow. My problem is that I don't know how to create a loop to accomplish the individual reading and processing of images.
I found on this site the code that enables the reading of multiple images stored locally. I placed the code where I thought I would work but it didn't.
The results obtained by the code below, out of thirty images only the first two were shown. Sorry about the formatting. Not an expert. I think the loop shouldn't be placed there as a whole and the indentation has to do something with the bad results. Any tips will be highly appreciated.
Thanks
...code
from PIL import Image
import os, sys
path = 'C:\\Users\\Owner\\Desktop\\Images\\'
dirs = os.listdir( path )
....Code
if __name__ == '__main__':
...code
for item in dirs:
if os.path.isfile(path+item):
im = Image.open(path+item)
f, e = os.path.splitext(path+item)
loadedImage = path + item
parser.add_argument('--image', type=str, default='loadedImage')
....code
for i, single_3d in enumerate(pose_3d):
plot_pose(single_3d)
pass
I switched the above code to and it worked. However, my images are nor displayed sequentially. Can anyone tell me how to fix this?:
This is the code:
import argparse
import logging
import time
import os
import ast
import common
import cv2
import numpy as np
from estimator import TfPoseEstimator
from networks import get_graph_path, model_wh
import sys
from PIL import Image
path = 'C:\\Users\\Owner\\Desktop\\data\\'
dirs = os.listdir(path)
dirs.sort()
from lifting.prob_model import Prob3dPose
from lifting.draw import plot_pose
logger = logging.getLogger('TfPoseEstimator')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %
(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if __name__ == '__main__':
os.chdir('..')
for item in dirs:
im = Image.open(path+item)
f, e = os.path.splitext(path+item)
parser = argparse.ArgumentParser(description='tf-pose-estimation run')
nameimage = f + e
print(nameimage)
parser.add_argument('--image', type=str, default = nameimage)
parser.add_argument('--model', type=str,
default='mobilenet_thin_432x368', help='cmu_640x480 / cmu_640x360 /
mobilenet_thin_432x368')
parser.add_argument('--scales', type=str, default='[1.0, (1.1, 0.05)]', help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
args = parser.parse_args()
scales = ast.literal_eval(args.scales)
w, h = model_wh(args.model)
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
image = common.read_imgfile(args.image, None, None)
t = time.time()
humans = e.inference(image, scales=[None])
elapsed = time.time() - t
logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))
image = cv2.imread(args.image, cv2.IMREAD_COLOR)
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
cv2.imshow('tf-pose-estimation result', image)
cv2.waitKey()
logger.info('3d lifting initialization.')
poseLifting = Prob3dPose('./src/lifting/models/prob_model_params.mat')
image_h, image_w = image.shape[:2]
standard_w = 640
standard_h = 480
pose_2d_mpiis = []
visibilities = []
for human in humans:
pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
pose_2d_mpiis.append([(int(x * standard_w + 0.5), int(y * standard_h + 0.5)) for x, y in pose_2d_mpii])
visibilities.append(visibility)
pose_2d_mpiis = np.array(pose_2d_mpiis)
visibilities = np.array(visibilities)
transformed_pose2d, weights = poseLifting.transform_joints(pose_2d_mpiis, visibilities)
pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)
pose_3dqt = np.array(pose_3d[0]).transpose()
for point in pose_3dqt:
#my points print(point)
import matplotlib.pyplot as plt
fig = plt.figure()
a = fig.add_subplot(2, 2, 1)
a.set_title('Result')
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
a = fig.add_subplot(2, 2, 2)
tmp = np.amax(e.heatMat, axis=2)
plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
tmp2 = e.pafMat.transpose((2, 0, 1))
tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)
a = fig.add_subplot(2, 2, 3)
a.set_title('Vectormap-x')
plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
a = fig.add_subplot(2, 2, 4)
a.set_title('Vectormap-y')
plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
for i, single_3d in enumerate(pose_3d):
plot_pose(single_3d)
plt.show()
pass
Image 1 Jumps to image 10.
The answer from this question may provide some example how to read the image in the folder.
My jpg pictures are stored sequentially pic0,pic1,pic2,pic3,pic4,pic20,pic30,pic100 but my code displays the images pic0,pic1,pic100,pic2,pic20,pic3,pic30.... How do I avoid that.?
The issue is with the way the name in the image dataset, and additionally with the sorting step you add after reading the directory list. My suggestion is to rename the file image to have zero leading (example pic000, pic001, .., pic010, pic011, ..).
To rename the files (given your image names), a minimal example:
import os
s1 = os.listdir('.')
for s in s1:
if ".jpg" not in s:
continue
if len(s)==8: # handle pic1.jpg pic2.jpg
#print(s[:-5] + '00' + s[3] + '.jpg')
os.rename(s, s[:-5] + '00' + s[3] + '.jpg')
elif len(s)==9: # handle pic10.jpg pic11.jpg
os.rename(s, s[:-6] + '0' + s[3:5] + '.jpg')
I used cv2 and plt to generate images using nested loops and functions. The memory usage keep increase during the program running until the computer crash.
I tried del and gc.collection() at the end of loops and functions, but it didn't work. I want to know where is the problem?
def seg(mat_data):
data = np.array(mat_data)
signals = []
peaks = biosppy.signals.ecg.christov_segmenter(signal=data, sampling_rate = 500)[0]
for i in range(2,len(peaks)-1):
left_diff = abs(peaks[i-1]-peaks[i])//2
right_diff = abs(peaks[i+1]-peaks[i])//2
x = peaks[i]-left_diff
y = peaks[i]+right_diff
signal = data[x:y]
signals.append(signal)
return signals,peaks
def sig2img(signals,file_name,label,channel):
if label == 'null':
for i, signal in enumerate(signals):
fig = plt.figure(frameon=False)
plt.plot(signal, linewidth=3.5)
plt.xticks([]), plt.yticks([])
for spine in plt.gca().spines.values():
spine.set_visible(False)
filename = 'test_img' + '/' +file_name+'_'+str(label)+'_'+str(channel)+'_'+str(i)+'.png'
fig.savefig(filename)
plt.close(fig)
im_gray = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
im_gray = cv2.resize(im_gray, (128, 128), interpolation = cv2.INTER_LANCZOS4)
cv2.imwrite(filename, im_gray)
return
for i, signal in enumerate(signals):
fig = plt.figure(frameon=False)
plt.plot(signal, linewidth=3.5)
plt.xticks([]), plt.yticks([])
for spine in plt.gca().spines.values():
spine.set_visible(False)
filename = 'train_img' + '/' +file_name+'_'+str(label)+'_'+str(channel)+'_'+str(i)+'.png'
fig.savefig(filename)
plt.close(fig)
im_gray = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
im_gray = cv2.resize(im_gray, (128, 128), interpolation = cv2.INTER_LANCZOS4)
cv2.imwrite(filename, im_gray)
def generate_images(train = True,cut=5000):
if train == False:
data_path = 'data/test'
test_name = os.listdir(data_path)
file_raw = [(scio.loadmat(data_path+'/'+m),) for m in test_name]
data = [ normalize(d[0]['data']) for d in file_raw]
for i in range(len(test_name)):
print(test_name[i])
print(data[i].shape)
label = 'null'
for j in range(data[i].shape[0]):
print(data[i][j,:cut].shape)
signals,peaks = seg(data[i][j,:cut])
channel = j+1
sig2img(signals,test_name[i],label,channel)
data_path = 'data/train'
reference_path = 'data/reference.txt'
mat_label = pd.read_csv(reference_path,sep='\t',header = None)
mat = mat_label[0]
file_raw = [(scio.loadmat(data_path+'/'+m),) for m in mat]
train_name = [m for m in mat]
label = mat_label[1]
data = [ normalize(d[0]['data']) for d in file_raw] #data shape: 12x5000
for i in range(len(label)):
print(train_name[i])
print(data[i].shape)
print(label[i])
for j in range(data[i].shape[0]):
print(data[i][j,:cut].shape)
signals,peaks = seg(data[i][j,:cut]) #peaks is a list with length of 6~10
channel = j+1 #signals is numpy array with shape of (500,)
sig2img(signals,train_name[i],label[i],channel)
I want to know where should I add the del or gc,or some other way to free the momery during the loop running.
I would like for the resulting graph to be in a new window. I know the phrase, %matplotlib inline puts the graph in the console, but if I remove it it gives me the error
FigureCanvasMac' object has no attribute 'get_renderer.
Is there a way I can go around this?
import re
import ftplib
import os
from urllib.request import urlopen
import json
import matplotlib
%matplotlib inline
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, PathPatch
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
import numpy as np
import pylab
import re
import sunpy.time
import numpy as np
from numpy.random import *
from matplotlib.patches import Rectangle
from adjustText import adjust_text
import pandas as pd
from scipy import interpolate
import sys
info = []
parsedFilename = []
dateAndTime = []
xcen = []
ycen = []
sciObj = []
xfov = []
yfov = []
matchingAR = []
def getNumberOfEntries(theJSON):
return len(dateAndTime)
def getInfo(counter, theJSON):
cont = True
while cont:
try:
dateAndTime.append(theJSON["Events"][counter]["date"])
xcen.append(float("%.2f" % theJSON["Events"][counter]["xCen"]))
ycen.append(float("%.2f" % theJSON["Events"][counter]["yCen"]))
xfov.append(float("%.2f" % theJSON["Events"][counter]["raster_fovx"]))
yfov.append(float("%.2f" % theJSON["Events"][counter]["raster_fovy"]))
sciObj.append(theJSON["Events"][counter]["sciObjectives"])
counter = counter + 1
getInfo(counter, theJSON)
except IndexError:
cont = False
break
def setXMax(theJSON):
xmax = xcen[0]
for i in range (0, getNumberOfEntries(theJSON)):
if xcen[i] > xmax:
xmax = xcen[i]
return round(xmax + 150,-1)
def setXMin(theJSON):
xmin = xcen[0]
for i in range (0, getNumberOfEntries(theJSON)):
if xcen[i] < xmin:
xmin = xcen[i]
return round(xmin - 150, -1)
def setYMax(theJSON):
ymax = ycen[0]
for i in range (0, getNumberOfEntries(theJSON)):
if ycen[i] > ymax:
ymax = ycen[i]
return round(ymax + 150, -1)
def setYMin(theJSON):
ymin = ycen[0]
for i in range (0, getNumberOfEntries(theJSON)):
if ycen[i] < ymin:
ymin = ycen[i]
return round(ymin - 150, -1)
# def sort():
# for i in range(len(dateAndTime)):
# for j in range(len(xcen)-1, i, -1):
# if ( xcen[j] < xcen[j-1]):
# temp1 = dateAndTime[j]
# dateAndTime[j] = dateAndTime[j-1]
# dateAndTime[j-1] = temp1
# temp2 = xcen[j]
# xcen[j] = xcen[j-1]
# xcen[j-1] = temp2
# temp3 = ycen[j]
# ycen[j] = ycen[j-1]
# ycen[j-1] = temp3
# temp4 = xfov[j]
# xfov[j] = xcen[j-1]
# xfov[j-1]=temp4
# temp5 = yfov[j]
# yfov[j] = ycen[j-1]
# yfov[j-1]=temp5
# temp6 = sciObj[j]
# sciObj[j] = sciObj[j-1]
# sciObj[j-1] = temp6
def sort():
for i in range(len(dateAndTime)):
for j in range(len(dateAndTime)-1, i, -1):
if ( dateAndTime[j] < dateAndTime[j-1]):
temp1 = dateAndTime[j]
dateAndTime[j] = dateAndTime[j-1]
dateAndTime[j-1] = temp1
temp2 = xcen[j]
xcen[j] = xcen[j-1]
xcen[j-1] = temp2
temp3 = ycen[j]
ycen[j] = ycen[j-1]
ycen[j-1] = temp3
temp4 = xfov[j]
xfov[j] = xcen[j-1]
xfov[j-1]=temp4
temp5 = yfov[j]
yfov[j] = ycen[j-1]
yfov[j-1]=temp5
temp6 = sciObj[j]
sciObj[j] = sciObj[j-1]
sciObj[j-1] = temp6
def createAnnotations(theJSON):
annotations = []
for i in range(getNumberOfEntries(theJSON)):
annotations.append('(' + str(xcen[i])+ ', '+ str(ycen[i]) + ')')
return annotations
def fixAnnotations(annotations):
texts = []
for xt, yt, s in zip(xcen, ycen, annotations):
texts.append(plt.text(xt, yt, s))
return texts
def plot(theJSON):
fig, ax = plt.subplots(figsize=(30, 20))
circle = Circle((0, 0), 980, facecolor='none', edgecolor=(0, 0.8, 0.8), linewidth=3, alpha=0.5)
ax.add_patch(circle)
plt.plot(xcen, ycen, color="red")
plt.plot(xcen, ycen, 'ro', color = 'blue')
plt.xlim([setXMin(theJSON), setXMax(theJSON)])
plt.ylim([setYMin(theJSON), setYMax(theJSON)])
ax.set_xticks(np.arange(setXMin(theJSON), setXMax(theJSON), 50))
ax.set_yticks(np.arange(setYMin(theJSON), setYMax(theJSON), 50))
for i in range(getNumberOfEntries(theJSON)):
if xfov[i] != 0:
xStart = xcen[i] - xfov[i]/20
yStart = ycen[i] - yfov[i]/20
ax.add_patch(Rectangle((xStart, yStart), xfov[i]/10, yfov[i]/10, facecolor='none'))
texts = fixAnnotations(createAnnotations(theJSON))
f = interpolate.interp1d(xcen, ycen)
x = np.linspace(min(xcen), max(ycen), 1000)
y = f(x)
adjust_text(texts, x, y, arrowprops=dict(arrowstyle="->", color='r', lw=2.0), autoalign='y', only_move={'points':'y', 'text':'y'}, expand_points=(1.2, 1.4), force_points=0.40)
plt.grid()
plt.show()
main(False)
def searchOnceMore(searchAgain):
if searchAgain == True:
noaaNmbr = input('Enter desired active region: ')
return noaaNmbr
else:
continueSearch = input('Would you like to search again?(yes/no)')
if continueSearch == 'yes':
noaaNmbr = input('Enter desired active region:')
return noaaNmbr
elif continueSearch == 'no':
sys.exit(0)
else:
print('please enter "yes" or "no"')
searchOnceMore(False)
def main(searchAgain):
noaaNmbr = searchOnceMore(searchAgain)
urlData = "http://www.lmsal.com/hek/hcr?cmd=search-events3&outputformat=json&instrument=IRIS&noaanum="+ noaaNmbr +"&hasData=true"
webUrl = urlopen(urlData)
counter = 0
if (webUrl.getcode()==200):
data = webUrl.read().decode('utf-8')
theJSON = json.loads(data)
getInfo(counter, theJSON)
else:
print ("You done messed up!!!")
sort()
for i in range (getNumberOfEntries(theJSON)):
print(dateAndTime[i])
print("(", xcen[i], ", ", ycen[i], ")")
print(sciObj[i])
print(' ')
if getNumberOfEntries(theJSON) != 0:
plot(theJSON)
else:
print('No observations for active region ' + noaaNmbr)
main()
main(True)
I have also used python and would suggest using John Zelle's graphic file. http://mcsp.wartburg.edu/zelle/python/
It's much more easier to understand and use in my opinion.
To open a new graph window:
Win1 = GraphWin("Graph Window 1", 100,100)
win2 = GraphWin("Graph Window 2", 100,150)
You can also open the python file to understand how it works. It might help understanding how to open a graph window your way. I only know how to open a new Graph Window through this file sorry, I hope it helps anyway!