I have this code but the way it generates the reports is not intuitive for the visualization with multiple data combined. What changes should I make so that the graph shows me a scale to measure or a way to group the values with more interactions?
The current output is:
The expected output is
# ------------------------------------------------------------
# packages
# ------------------------------------------------------------
import numpy as np
import pandas as pd
import os
import glob
import re
import matplotlib.pyplot as plt
import networkx as nx
# ------------------------------------------------------------
# Funcoes
# ------------------------------------------------------------
from readidlist import readIdList
from extrafuns import *
# ------------------------------------------------------------
def getgrapho():
# lendo a lista dos IDs e nome dos pesquisadores
df_idlist = readIdList()
# df_idlist['ID_LATTES'] = df_idlist['ID_LATTES'].apply(ss)
# config_file = open('./config.txt', 'r')
config_file = open('./config.txt', 'r', encoding='utf-8')
yyi = config_file.readlines()[5].split(':')[1]
yyi = yyi.rstrip('\n')
yyi = yyi.strip(' ')
yyi = float(yyi)
config_file.close()
# config_file = open('./config.txt', 'r')
config_file = open('./config.txt', 'r', encoding='utf-8')
yyf = config_file.readlines()[6].split(':')[1]
yyf = yyf.rstrip('\n')
yyf = yyf.strip(' ')
yyf = float(yyf)
config_file.close()
# ------------------------------------------------------------
# importadando os data frames gerados pelo gettidy
# ------------------------------------------------------------
dfppe_uniq = pd.read_csv('./csv_producao/projetos_uniq.csv',
header=0)
dfpaper = pd.read_csv('./csv_producao/periodicos_all.csv',
header=0)
dfpaper_uniq = pd.read_csv('./csv_producao/periodicos_uniq.csv',
header=0)
# paper uniq
dfpaper['ID'] = dfpaper['ID'].apply(ss)
dfpaper_uniq['ID'] = dfpaper_uniq['ID'].apply(ss)
# filtrando o ano
# projetos
dfppe_uniq['YEAR_INI'] = dfppe_uniq['YEAR_INI'].replace('VAZIO', -99)
num99 = dfppe_uniq[dfppe_uniq['YEAR_INI'] == -99]
if len(num99) >= 1:
print('------------------------------------------------------------')
print('ATENCAO: ' + str(len(num99)) + 'projetos sem ano inicial')
print('------------------------------------------------------------')
dfppe_uniq['YEAR_INI'] = dfppe_uniq['YEAR_INI'].apply(ff)
dfppe_uniq = dfppe_uniq[(dfppe_uniq['YEAR_INI'] >= yyi)]
# ------------------------------------------------------------
# periodicos
dfpaper['YEAR'] = dfpaper['YEAR'].replace('VAZIO', -99)
dfpaper_uniq['YEAR'] = dfpaper_uniq['YEAR'].replace('VAZIO', -99)
num99 = dfpaper[dfpaper['YEAR'] == -99]
if len(num99) >= 1:
print('------------------------------------------------------------')
print('ATENCAO: ' + str(len(num99)) + 'artigos sem ano de publicacao')
print('------------------------------------------------------------')
dfpaper['YEAR'] = dfpaper['YEAR'].apply(ff)
dfpaper_uniq['YEAR'] = dfpaper_uniq['YEAR'].apply(ff)
dfpaper = dfpaper[(dfpaper['YEAR'] >= yyi) & (dfpaper['YEAR'] <= yyf)]
dfpaper_uniq = dfpaper_uniq[(dfpaper_uniq['YEAR']
>= yyi) & (dfpaper_uniq['YEAR'] <= yyf)]
# ------------------------------------------------------------
# ordenando por ano (crescente)
dfppe_uniq_pesq = dfppe_uniq[dfppe_uniq['NATUREZA'] == 'PESQUISA']
dfppe_uniq_pesq = dfppe_uniq_pesq.sort_values(['YEAR_INI'])
dfppe_uniq_ext = dfppe_uniq[dfppe_uniq['NATUREZA'] == 'EXTENSAO']
dfppe_uniq_ext = dfppe_uniq_ext.sort_values(['YEAR_INI'])
dfpaper = dfpaper.sort_values(['YEAR'])
dfpaper_uniq = dfpaper_uniq.sort_values(['YEAR'])
# ------------------------------------------------------------
# carregando df com dados pessoais
lscsv_fullname = glob.glob('./csv_producao/*fullname.csv')
# df com nome completo, sobrenome e id
dffullname = pd.DataFrame()
for i in range(len(lscsv_fullname)):
a = pd.read_csv(lscsv_fullname[i], header=0, dtype='str')
dffullname = dffullname.append(a, ignore_index=False)
# passando ID para string, para poder comparar com dfpaper
dffullname['ID'] = dffullname['ID'].apply(ss)
dffullname = dffullname.reset_index(drop=True)
# verificando a interacao de periodicos entre integrantes
lsid = []
lsid_tocompare = []
lsinter_qtd = []
for m in range(len(df_idlist)):
idd = str(df_idlist.iloc[m, 0])
lname = dffullname[dffullname['ID'] == idd]
lname = lname.iloc[0, 1]
lname = lname.upper()
# lname = lname.split(';')
# print(lname)
dfids_tocompare = dffullname[dffullname['ID'] != str(idd)]
for n in range(len(dfids_tocompare)):
idd_tocompare = dfids_tocompare.iloc[n, 0]
dd = dfpaper[dfpaper['ID'] == idd_tocompare]
lsid.append(str(idd))
lsid_tocompare.append(idd_tocompare)
# DANGER ATTENTION FIX lname deve ser o nome completo
# removendo caract desnecessarios
interac = 0
for o in range(len(dd)):
authors = dd['AUTHOR'].iloc[o].upper()
authors = authors.replace('[', '')
authors = authors.replace(']', '')
authors = authors.replace("'", '')
authors = authors.split(',')
# print(authors)
for op in range(len(authors)):
# print(authors[op])
if len(authors[op]) > 0:
if authors[op][0] == ' ':
authors[op] = authors[op][1:]
# interac = 0
inpaper = list(set([lname]) & set(authors))
if len(inpaper) >= 1:
interac = interac + 1
# print(interac)
# print(lname)
# print(authors)
lsinter_qtd.append(interac)
dfinterac = pd.DataFrame({'IDD': lsid,
'IDD_COMP': lsid_tocompare,
'WEIGHT': lsinter_qtd})
# data frame para profissionais sem interacao em periodicos
lsnointer_period = []
for m in range(len(df_idlist)):
aano = dfinterac[dfinterac['IDD'] == df_idlist.iloc[m, 0]]
aasum = aano['WEIGHT'].sum()
aano_a = dfinterac[dfinterac['IDD_COMP'] == df_idlist.iloc[m, 0]]
aasum_a = aano_a['WEIGHT'].sum()
if aasum == 0 and aasum_a == 0:
nointer = dffullname[dffullname['ID'] ==
df_idlist.iloc[m, 0]].reset_index(drop=True)
nointer = nointer.iloc[0, 1]
lsnointer_period.append(nointer)
dfnointerac = pd.DataFrame({'NOME': lsnointer_period})
dfnointerac.to_csv('./csv_producao/periodicos_nointer.csv',
index=False, sep=',')
# DANGER ATTENTION
# dfinterac.to_csv('test.csv', index=False)
# eliminando linhas sem interacao
indexremove = []
for i in range(len(lsid)):
if lsinter_qtd[i] == 0:
indexremove.append(i)
for index in sorted(indexremove, reverse=True):
del lsid[index]
del lsid_tocompare[index]
del lsinter_qtd[index]
# ------------------------------------------------------------
# Grapho
plt.figure(figsize=(12, 9.5))
G = nx.Graph()
for i in range(len(lsid)):
G.add_edge(lsid[i],
lsid_tocompare[i],
weight=lsinter_qtd[0])
pos = nx.spring_layout(G, 1.75)
# colors for nodes
colours = ['#5a7d9a', 'red', 'green', 'yellow',
'gray', 'orange', 'blue', 'magenta',
'#00555a', '#f7d560', 'cyan', '#b6b129',
'#a1dd72', '#d49acb', '#d4a69a', '#977e93',
'#a3cc72', '#c60acb', '#d4b22a', '#255e53',
'#77525a', '#c7d511', '#c4c22b', '#c9b329',
'#c8dd22', '#f75acb', '#b1a40a', '#216693',
'#b1cd32', '#b33acb', '#c9a32b', '#925e11',
'#c5dd39', '#d04205', '#d8a82a', '#373e29']
lsgroup_uniq = df_idlist['GROUP'].unique()
dic_colours = {}
for i in range(len(lsgroup_uniq)):
dic_colours[lsgroup_uniq[i]] = colours[i]
a = list(G.nodes())
node_colours = []
for i in range(len(a)):
x = df_idlist[df_idlist['ID_LATTES'] == a[i]]
x = x.iloc[0, 2]
c = dic_colours[x]
node_colours.append(c)
# nodes
nx.draw_networkx_nodes(G, pos,
node_size=400,
node_shape='o',
node_color=node_colours,
alpha=0.7)
# labels
nn = list(G.nodes)
diclabel = {}
for i in range(len(nn)):
x = df_idlist[df_idlist['ID_LATTES'] == nn[i]]
xid = x.iloc[0, 0]
xname = x.iloc[0, 1]
diclabel[str(xid)] = xname
# edges
nx.draw_networkx_edges(G, pos, # edgelist=lsinter_qtd,
width=1, edge_color='orange')
# labels
nx.draw_networkx_labels(G, pos, labels=diclabel, font_size=16,
font_family='sans-serif')
plt.axis('off')
plt.tight_layout()
plt.savefig('./relatorio/figures/grapho.png')
# plt.show()
# ------------------------------------------------------------
# ------------------------------------------------------------
# Com pesos
# ------------------------------------------------------------
# G = nx.Graph()
# for i in range(len(lsid)):
# G.add_edge(lsid[i], lsid_tocompare[i], weight=lsinter_qtd[0])
# # elarge = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] > 3]
# # esmall = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] <= 3]
# pos = nx.spring_layout(G) # positions for all nodes
# # nodes
# nx.draw_networkx_nodes(G, pos,
# node_size=400,
# node_shape='o',
# node_color=node_colours)
# # label = lsinter_qtd)
# # edges
# nx.draw_networkx_edges(G, pos, # edgelist=lsinter_qtd,
# width=1, edge_color='orange')
# # nx.draw_networkx_edges(G, pos, edgelist=elarge,
# # width=1, edge_color='orange')
# # nx.draw_networkx_edges(G, pos, edgelist=esmall,
# # width=1, arrowsize=30, alpha=0.5,
# # edge_color='b', style='dashed')
# # labels
# nx.draw_networkx_labels(G, pos, labels=diclabel,
# font_size=14, font_family='sans-serif')
# plt.axis('off')
# plt.show()
# ------------------------------------------------------------
Some solution for this problem
Related
I trained my model with maskrcnn and now I need to test it. How can I extract AP and AR and plot the graph, ok I know how to plot with matplotlib, but I need to plot Precision-recall curve but for that don't know how to access AP and AR values. Where are they saved?
I'm using this coco_eval script, and from here I see in function summarize there are print("IoU metric: {}".format(iou_type)) and this I got in output and under that AP and AR results, but I can't find it here in code. Where is this calculation?
coco_eval.py
import json
import tempfile
import numpy as np
import copy
import time
import torch
import torch._six
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from collections import defaultdict
import utils
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
coco_dt = loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = utils.all_gather(img_ids)
all_eval_imgs = utils.all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
# Ideally, pycocotools wouldn't have hard-coded prints
# so that we could avoid copy-pasting those two functions
def createIndex(self):
# create index
# print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
# print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
maskUtils = mask_util
def loadRes(self, resFile):
"""
Load result file and return a result api object.
Args:
self (obj): coco object with ground truth annotations
resFile (str): file name of result file
Returns:
res (obj): result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# print('Loading and preparing results...')
# tic = time.time()
if isinstance(resFile, torch._six.string_classes):
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id + 1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if 'segmentation' not in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2] * bb[3]
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if 'bbox' not in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x2 - x1) * (y2 - y1)
ann['id'] = id + 1
ann['bbox'] = [x1, y1, x2 - x1, y2 - y1]
# print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
createIndex(res)
return res
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
#################################################################
# end of straight copy from pycocotools, just removing the prints
#################################################################
And this is my code for evaluation:
def evaluate(model, data_loader, device):
n_threads = torch.get_num_threads()
# FIXME remove this and make paste_masks_in_image run on the GPU
torch.set_num_threads(1)
cpu_device = torch.device("cpu")
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
coco = get_coco_api_from_dataset(data_loader.dataset)
iou_types = _get_iou_types(model)
coco_evaluator = CocoEvaluator(coco, iou_types)
for images, targets in metric_logger.log_every(data_loader, 100, header):
images = list(img.to(device) for img in images)
if torch.cuda.is_available():
torch.cuda.synchronize()
model_time = time.time()
outputs = model(images)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
coco_evaluator.accumulate()
coco_evaluator.summarize()
torch.set_num_threads(n_threads)
return coco_evaluator
This is my results what I got:
I am a physics student trying to simulate the 2D Ising model using the Metropolis algorithm in Python. I wanted to see the time evolution of imshow() and used the following code below. But the program outputs a blank graph. What do I do? Thank you for your help!!
n = 50
ims =[]
def lattice(p):
init_random = np.random.random((n,n))
d = np.zeros((n,n))
d[init_random>=p] = 1
d[init_random<p] = -1
return d
def energy(lattice):
kern = generate_binary_structure(2, 1)
kern[1][1] = False
e = -lattice * convolve(lattice, kern, mode='constant', cval=0)
return e.sum()
def metropolis(s, b, t, e):
s = s.copy()
for t in range(0,t-1):
x = np.random.randint(0,n)
y = np.random.randint(0,n)
si = s[x,y]
sf = -1*si
ei = 0
ef = 0
if x > 0:
ei += -si*s[x-1,y]
ef += -sf*s[x-1,y]
if x < n-1:
ei += -si*s[x+1,y]
ef += -sf*s[x+1,y]
if y > 0:
ei += -si*s[x,y-1]
ef += -sf*s[x,y-1]
if y < n-1:
ei += -si*s[x,y+1]
ef += -sf*s[x,y+1]
de = ef - ei
if (de>0)*(np.random.random() < np.exp(-b*de)):
s[x,y] = sf
elif (de<=0):
s[x,y] = sf
im = plt.imshow(s, animated = True)
ims.append([im])
return ims
u = lattice(0.25)
e = energy(u)
fig = plt.figure(figsize=(6,6))
ims = metropolis(u, 0.7, 100, e)
ani = animation.ArtistAnimation(fig, ims, interval = 50, blit = True, repeat_delay = 100)
plt.show()
I only needed to add the imports and change the def energy section (specifically the e= line). It works nicely in my system.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy import ndimage, convolve
n = 50
ims =[]
def lattice(p):
init_random = np.random.random((n,n))
d = np.zeros((n,n))
d[init_random>=p] = 1
d[init_random<p] = -1
return d
def energy(lattice):
kern = ndimage.generate_binary_structure(2, 1)
kern[1][1] = False
e = -lattice * ndimage.convolve(lattice, kern, mode='constant', cval=1)
return e.sum()
def metropolis(s, b, t, e):
s = s.copy()
for t in range(0,t-1):
x = np.random.randint(0,n)
y = np.random.randint(0,n)
si = s[x,y]
sf = -1*si
ei = 0
ef = 0
if x > 0:
ei += -si*s[x-1,y]
ef += -sf*s[x-1,y]
if x < n-1:
ei += -si*s[x+1,y]
ef += -sf*s[x+1,y]
if y > 0:
ei += -si*s[x,y-1]
ef += -sf*s[x,y-1]
if y < n-1:
ei += -si*s[x,y+1]
ef += -sf*s[x,y+1]
de = ef - ei
if (de>0)*(np.random.random() < np.exp(-b*de)):
s[x,y] = sf
elif (de<=0):
s[x,y] = sf
im = plt.imshow(s, animated = True)
ims.append([im])
return ims
u = lattice(0.25)
e = energy(u)
fig = plt.figure(figsize=(6,6))
ims = metropolis(u, 0.7, 100, e)
ani = animation.ArtistAnimation(fig, ims, interval = 50, blit = True, repeat_delay = 100)
plt.show()
I wrote a MPC with Python and it worked before. After a long time I want to use it again but I got this Error
f0 passed has more than 1 dimension.
But I didn't change anything on my code. It is some kind of strange.
Here is my code:
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def mpcAugment(Am, Bm, Cm ):
"Function for Augmented Model"
nx, nu = Bm.shape
ny = Cm.shape[0]
A = np.zeros((nx+ny,nx+ny))
A[0:nx,0:nx] = Am
A[nx:nx+ny,0:nx] = Cm#Am
A[nx:nx+ny,nx:nx+ny] = np.eye(ny)
B = np.zeros((nx+ny,nu))
B[0:nx,:nu] = Bm
B[nx:nx+ny,:nu] = Cm#Bm
C = np.zeros((ny,nx+ny))
C[:ny,nx:nx+ny] = np.eye(ny)
return A, B, C
'Define Parameters'
k = 0.4
AICB = 153.8
mcp = 8.8e4
vamb1 = 30
vamb2 = 45
a = -k*AICB/mcp
b = -1/mcp
Ts = 20
VICBref = -5.0
Am = np.array([[1+Ts*a]])
Bm = np.array([[Ts*b]])
Gm = np.array([[-Ts*a]])
Cm = np.array([[1]])
A, B, C = mpcAugment(Am,Bm,Cm)
A, G, C = mpcAugment(Am,Gm,Cm)
nx, nu = B.shape
ny = C.shape[0]
nd = G.shape[1]
Np = 20
Nu = 5
F = np.zeros((Np*ny,nx))
PHI = np.zeros((Np*ny,Nu*nu))
PHIw = np.zeros((Np*ny,Np*nd))
for i in range(0,Np):
Ai = npl.matrix_power(A, i+1)
F[i*ny:(i+1)*ny,:] = C#Ai
for j in range(0, Nu):
if j <= i:
Aij = np.linalg.matrix_power(A, i-j)
PHI[i*ny:(i+1)*ny, j*nu:(j+1)*nu] = C#Aij#B
for j in range(0, Np):
if j <= i:
Aij = np.linalg.matrix_power(A, i-j)
PHIw[i*ny:(i+1)*ny, j*nd:(j+1)*nd] = C#Aij#G
umax = 3100
umin = 0
Q = np.eye(Np*ny)
R = 1e-2*np.eye(Nu*nu)
Rs = VICBref*np.ones((Np*ny,1))
Ainq = np.zeros((2*Nu*nu,Nu*nu))
binq = np.zeros((2*Nu*nu,1))
cinq = np.zeros((2*Nu*nu,1))
for i in range(0,Nu):
binq[i*nu:(i+1)*nu] = umax
binq[(i+Nu)*nu:(Nu+i+1)*nu] = 1
cinq[i*nu:(i+1)*nu] = 1
cinq[(i+Nu)*nu:(Nu+i+1)*nu] = -1
for j in range(0,i+1):
Ainq[i*nu:(i+1)*nu,j*nu:(j+1)*nu] = np.eye(nu)
Ainq[(i+Nu)*nu:(Nu+i+1)*nu,j*nu:(j+1)*nu] = np.eye(nu)
u0 = 0
def objective(du):
dU = np.array(du).reshape((len(du),1))
Y = F#x + PHI#dU + PHIw#w
return np.transpose((Rs-Y))#(Rs-Y)+np.transpose(dU)#R#(dU)
def constraint1(du):
dU = np.array(du).reshape((len(du),1))
return (binq - Ainq#dU - cinq*u0)[0]
#print(objective([1,1,1]))
ulim = (umin, umax)
bnds = np.kron(np.ones((Nu,1)),ulim)
#print(bnds)
Um = np.ones((nu*Nu,1))
Tsim = 5e4
time = np.arange(0,Tsim,Ts)
Nt = len(time)
xm = np.zeros((Nt,1))
um = np.zeros((Nt,nu))
ym = np.zeros((Nt,ny))
xm[0] = 0
ym[0] = Cm.dot(xm[0])
w = np.zeros((Np*nd,1))
print('Am = ',Am)
print('Bm = ',Bm)
print('Cm = ',Cm)
x = np.zeros((nx,1))
x[1] = xm[0]
vamb = vamb1
Vamb = np.zeros((Nt,1))
Ns = int(np.floor(Nt/2))
Vamb[0:Ns] = vamb1*np.ones((Ns,1))
Vamb[Ns:Nt] = vamb2*np.ones((Nt-Ns,1))
Vref = VICBref*np.ones((Nt,1))
con = {'type':'ineq','fun':constraint1}
for i in range(0,Nt-1):
sol = minimize(objective, Um, method = 'SLSQP',constraints = con)
if sol.success == False:
print('Error Cant solve problem')
exit()
Um = sol.x
um[i+1] = um[i] + Um[0]
u0 = um[i+1]
xm[i+1] = Am.dot(xm[i])+Bm.dot(um[i+1])+Gm.dot(Vamb[i])
ym[i+1] = Cm.dot(xm[i+1])
for j in range(0,Np):
if i+j < Nt:
Rs[j] = Vref[i+j]
w[j] = Vamb[i+j]-Vamb[i+j-1]
else:
Rs[j] = Vref[Nt-1]
w[j] = 0
x[0] = xm[i+1] - xm[i]
x[1] = xm[i+1]
print('Q = ',um[i+1],' , VICB = ',xm[i+1], ' vamb = ', Vamb[i])
hour = 60*60
plt.figure()
plt.subplot(2,1,1)
plt.plot(time/hour,ym)
plt.plot(time/hour,Vref,'--')
plt.xlabel('time(hours)')
plt.xlim([0, Tsim/hour])
plt.subplot(2,1,2)
plt.plot(time/hour,um)
plt.xlim([0, Tsim/hour])
plt.show()
It about a controller, which control the temperature of a cool box.
Is that possible that anything changed in main simply code?
I think the problem is now in minimizations part.
I reinstalled all of my libraries and it worked
Previously I created a lot of Python objects of class A, and I would like to add a new function plotting_in_PC_space_with_coloring_option() (the purpose of this function is to plot some data in this object) to class A and use those old objects to call plotting_in_PC_space_with_coloring_option().
An example is:
import copy
import numpy as np
from math import *
from pybrain.structure import *
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets.supervised import SupervisedDataSet
import pickle
import neural_network_related
class A(object):
"""the neural network for simulation"""
'''
todo:
- find boundary
- get_angles_from_coefficients
'''
def __init__(self,
index, # the index of the current network
list_of_coor_data_files, # accept multiple files of training data
energy_expression_file, # input, output files
preprocessing_settings = None,
connection_between_layers = None, connection_with_bias_layers = None,
PCs = None, # principal components
):
self._index = index
self._list_of_coor_data_files = list_of_coor_data_files
self._energy_expression_file = energy_expression_file
self._data_set = []
for item in list_of_coor_data_files:
self._data_set += self.get_many_cossin_from_coordiantes_in_file(item)
self._preprocessing_settings = preprocessing_settings
self._connection_between_layers = connection_between_layers
self._connection_with_bias_layers = connection_with_bias_layers
self._node_num = [8, 15, 2, 15, 8]
self._PCs = PCs
def save_into_file(self, filename = None):
if filename is None:
filename = "network_%s.pkl" % str(self._index) # by default naming with its index
with open(filename, 'wb') as my_file:
pickle.dump(self, my_file, pickle.HIGHEST_PROTOCOL)
return
def get_cossin_from_a_coordinate(self, a_coordinate):
num_of_coordinates = len(a_coordinate) / 3
a_coordinate = np.array(a_coordinate).reshape(num_of_coordinates, 3)
diff_coordinates = a_coordinate[1:num_of_coordinates, :] - a_coordinate[0:num_of_coordinates - 1,:] # bond vectors
diff_coordinates_1=diff_coordinates[0:num_of_coordinates-2,:];diff_coordinates_2=diff_coordinates[1:num_of_coordinates-1,:]
normal_vectors = np.cross(diff_coordinates_1, diff_coordinates_2);
normal_vectors_normalized = np.array(map(lambda x: x / sqrt(np.dot(x,x)), normal_vectors))
normal_vectors_normalized_1 = normal_vectors_normalized[0:num_of_coordinates-3, :];normal_vectors_normalized_2 = normal_vectors_normalized[1:num_of_coordinates-2,:];
diff_coordinates_mid = diff_coordinates[1:num_of_coordinates-2]; # these are bond vectors in the middle (remove the first and last one), they should be perpendicular to adjacent normal vectors
cos_of_angles = range(len(normal_vectors_normalized_1))
sin_of_angles_vec = range(len(normal_vectors_normalized_1))
sin_of_angles = range(len(normal_vectors_normalized_1)) # initialization
for index in range(len(normal_vectors_normalized_1)):
cos_of_angles[index] = np.dot(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles_vec[index] = np.cross(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles[index] = sqrt(np.dot(sin_of_angles_vec[index], sin_of_angles_vec[index])) * np.sign(sum(sin_of_angles_vec[index]) * sum(diff_coordinates_mid[index]));
return cos_of_angles + sin_of_angles
def get_many_cossin_from_coordinates(self, coordinates):
return map(self.get_cossin_from_a_coordinate, coordinates)
def get_many_cossin_from_coordiantes_in_file (self, filename):
coordinates = np.loadtxt(filename)
return self.get_many_cossin_from_coordinates(coordinates)
def mapminmax(self, my_list): # for preprocessing in network
my_min = min(my_list)
my_max = max(my_list)
mul_factor = 2.0 / (my_max - my_min)
offset = (my_min + my_max) / 2.0
result_list = np.array(map(lambda x : (x - offset) * mul_factor, my_list))
return (result_list, (mul_factor, offset)) # also return the parameters for processing
def get_mapminmax_preprocess_result_and_coeff(self,data=None):
if data is None:
data = self._data_set
data = np.array(data)
data = np.transpose(data)
result = []; params = []
for item in data:
temp_result, preprocess_params = self.mapminmax(item)
result.append(temp_result)
params.append(preprocess_params)
return (np.transpose(np.array(result)), params)
def mapminmax_preprocess_using_coeff(self, input_data=None, preprocessing_settings=None):
# try begin
if preprocessing_settings is None:
preprocessing_settings = self._preprocessing_settings
temp_setttings = np.transpose(np.array(preprocessing_settings))
result = []
for item in input_data:
item = np.multiply(item - temp_setttings[1], temp_setttings[0])
result.append(item)
return result
# try end
def get_expression_of_network(self, connection_between_layers=None, connection_with_bias_layers=None):
if connection_between_layers is None:
connection_between_layers = self._connection_between_layers
if connection_with_bias_layers is None:
connection_with_bias_layers = self._connection_with_bias_layers
node_num = self._node_num
expression = ""
# first part: network
for i in range(2):
expression = '\n' + expression
mul_coef = connection_between_layers[i].params.reshape(node_num[i + 1], node_num[i])
bias_coef = connection_with_bias_layers[i].params
for j in range(np.size(mul_coef, 0)):
temp_expression = 'layer_%d_unit_%d = tanh( ' % (i + 1, j)
for k in range(np.size(mul_coef, 1)):
temp_expression += ' %f * layer_%d_unit_%d +' % (mul_coef[j, k], i, k)
temp_expression += ' %f);\n' % (bias_coef[j])
expression = temp_expression + expression # order of expressions matter in OpenMM
# second part: definition of inputs
index_of_backbone_atoms = [2, 5, 7, 9, 15, 17, 19];
for i in range(len(index_of_backbone_atoms) - 3):
index_of_coss = i
index_of_sins = i + 4
expression += 'layer_0_unit_%d = (raw_layer_0_unit_%d - %f) * %f;\n' % \
(index_of_coss, index_of_coss, self._preprocessing_settings[index_of_coss][1], self._preprocessing_settings[index_of_coss][0])
expression += 'layer_0_unit_%d = (raw_layer_0_unit_%d - %f) * %f;\n' % \
(index_of_sins, index_of_sins, self._preprocessing_settings[index_of_sins][1], self._preprocessing_settings[index_of_sins][0])
expression += 'raw_layer_0_unit_%d = cos(dihedral_angle_%d);\n' % (index_of_coss, i)
expression += 'raw_layer_0_unit_%d = sin(dihedral_angle_%d);\n' % (index_of_sins, i)
expression += 'dihedral_angle_%d = dihedral(p%d, p%d, p%d, p%d);\n' % \
(i, index_of_backbone_atoms[i], index_of_backbone_atoms[i+1],index_of_backbone_atoms[i+2],index_of_backbone_atoms[i+3])
return expression
def write_expression_into_file(self, out_file = None):
if out_file is None: out_file = self._energy_expression_file
expression = self.get_expression_of_network()
with open(out_file, 'w') as f_out:
f_out.write(expression)
return
def get_mid_result(self, input_data=None, connection_between_layers=None, connection_with_bias_layers=None):
if input_data is None: input_data = self._data_set
if connection_between_layers is None: connection_between_layers = self._connection_between_layers
if connection_with_bias_layers is None: connection_with_bias_layers = self._connection_with_bias_layers
node_num = self._node_num
temp_mid_result = range(4)
mid_result = []
# first need to do preprocessing
for item in self.mapminmax_preprocess_using_coeff(input_data, self._preprocessing_settings):
for i in range(4):
mul_coef = connection_between_layers[i].params.reshape(node_num[i + 1], node_num[i]) # fix node_num
bias_coef = connection_with_bias_layers[i].params
previous_result = item if i == 0 else temp_mid_result[i - 1]
temp_mid_result[i] = np.dot(mul_coef, previous_result) + bias_coef
if i != 3: # the last output layer is a linear layer, while others are tanh layers
temp_mid_result[i] = map(tanh, temp_mid_result[i])
mid_result.append(copy.deepcopy(temp_mid_result)) # note that should use deepcopy
return mid_result
def get_PC_and_save_it_to_network(self):
'''get PCs and save the result into _PCs
'''
mid_result = self.get_mid_result()
self._PCs = [item[1] for item in mid_result]
return
def train(self):
####################### set up autoencoder begin #######################
node_num = self._node_num
in_layer = LinearLayer(node_num[0], "IL")
hidden_layers = [TanhLayer(node_num[1], "HL1"), TanhLayer(node_num[2], "HL2"), TanhLayer(node_num[3], "HL3")]
bias_layers = [BiasUnit("B1"),BiasUnit("B2"),BiasUnit("B3"),BiasUnit("B4")]
out_layer = LinearLayer(node_num[4], "OL")
layer_list = [in_layer] + hidden_layers + [out_layer]
molecule_net = FeedForwardNetwork()
molecule_net.addInputModule(in_layer)
for item in (hidden_layers + bias_layers):
molecule_net.addModule(item)
molecule_net.addOutputModule(out_layer)
connection_between_layers = range(4); connection_with_bias_layers = range(4)
for i in range(4):
connection_between_layers[i] = FullConnection(layer_list[i], layer_list[i+1])
connection_with_bias_layers[i] = FullConnection(bias_layers[i], layer_list[i+1])
molecule_net.addConnection(connection_between_layers[i]) # connect two neighbor layers
molecule_net.addConnection(connection_with_bias_layers[i])
molecule_net.sortModules() # this is some internal initialization process to make this module usable
####################### set up autoencoder end #######################
trainer = BackpropTrainer(molecule_net, learningrate=0.002,momentum=0.4,verbose=False, weightdecay=0.1, lrdecay=1)
data_set = SupervisedDataSet(node_num[0], node_num[4])
sincos = self._data_set
(sincos_after_process, self._preprocessing_settings) = self.get_mapminmax_preprocess_result_and_coeff(data = sincos)
for item in sincos_after_process: # is it needed?
data_set.addSample(item, item)
trainer.trainUntilConvergence(data_set, maxEpochs=50)
self._connection_between_layers = connection_between_layers
self._connection_with_bias_layers = connection_with_bias_layers
print("Done!\n")
return
def create_sge_files_for_simulation(self,potential_centers = None):
if potential_centers is None:
potential_centers = self.get_boundary_points()
neural_network_related.create_sge_files(potential_centers)
return
def get_boundary_points(self, list_of_points = None, num_of_bins = 5):
if list_of_points is None: list_of_points = self._PCs
x = [item[0] for item in list_of_points]
y = [item[1] for item in list_of_points]
temp = np.histogram2d(x,y, bins=[num_of_bins, num_of_bins])
hist_matrix = temp[0]
# add a set of zeros around this region
hist_matrix = np.insert(hist_matrix, num_of_bins, np.zeros(num_of_bins), 0)
hist_matrix = np.insert(hist_matrix, 0, np.zeros(num_of_bins), 0)
hist_matrix = np.insert(hist_matrix, num_of_bins, np.zeros(num_of_bins + 2), 1)
hist_matrix = np.insert(hist_matrix, 0, np.zeros(num_of_bins +2), 1)
hist_matrix = (hist_matrix != 0).astype(int)
sum_of_neighbors = np.zeros(np.shape(hist_matrix)) # number of neighbors occupied with some points
for i in range(np.shape(hist_matrix)[0]):
for j in range(np.shape(hist_matrix)[1]):
if i != 0: sum_of_neighbors[i,j] += hist_matrix[i - 1][j]
if j != 0: sum_of_neighbors[i,j] += hist_matrix[i][j - 1]
if i != np.shape(hist_matrix)[0] - 1: sum_of_neighbors[i,j] += hist_matrix[i + 1][j]
if j != np.shape(hist_matrix)[1] - 1: sum_of_neighbors[i,j] += hist_matrix[i][j + 1]
bin_width_0 = temp[1][1]-temp[1][0]
bin_width_1 = temp[2][1]-temp[2][0]
min_coor_in_PC_space_0 = temp[1][0] - 0.5 * bin_width_0 # multiply by 0.5 since we want the center of the grid
min_coor_in_PC_space_1 = temp[2][0] - 0.5 * bin_width_1
potential_centers = []
for i in range(np.shape(hist_matrix)[0]):
for j in range(np.shape(hist_matrix)[1]):
if hist_matrix[i,j] == 0 and sum_of_neighbors[i,j] != 0: # no points in this block but there are points in neighboring blocks
temp_potential_center = [round(min_coor_in_PC_space_0 + i * bin_width_0, 2), round(min_coor_in_PC_space_1 + j * bin_width_1, 2)]
potential_centers.append(temp_potential_center)
return potential_centers
# this function is added after those old objects of A were created
def plotting_in_PC_space_with_coloring_option(self,
list_of_coordinate_files_for_plotting=None, # accept multiple files
color_option='pure'):
'''
by default, we are using training data, and we also allow external data input
'''
if list_of_coordinate_files_for_plotting is None:
PCs_to_plot = self._PCs
else:
temp_sincos = []
for item in list_of_coordinate_files_for_plotting:
temp_sincos += self.get_many_cossin_from_coordiantes_in_file(item)
temp_mid_result = self.get_mid_result(input_data = temp_sincos)
PCs_to_plot = [item[1] for item in temp_mid_result]
(x, y) = ([item[0] for item in PCs_to_plot], [item[1] for item in PCs_to_plot])
# coloring
if color_option == 'pure':
coloring = 'red'
elif color_option == 'step':
coloring = range(len(x))
fig, ax = plt.subplots()
ax.scatter(x,y, c=coloring)
ax.set_xlabel("PC1")
ax.set_ylabel("PC2")
plt.show()
return
But it seems that plotting_in_PC_space_with_coloring_option() was not binded to those old objects, is here any way to fix it (I do not want to recreate these objects since creation involves CPU-intensive calculation and would take very long time to do it)?
Thanks!
Something like this:
class A:
def q(self): print 1
a = A()
def f(self): print 2
setattr(A, 'f', f)
a.f()
This is called a monkey patch.
I need to add some points to an existent plot I do in chaco. I have tried with plot.request_redraw() but it didn't work. What else can I do?
This is the piece of code:
class PlotApp(HasTraits):
plotdata = Instance(ArrayPlotData)
returns_plot = Instance(Plot)
plot_type = Enum('line', 'scatter')
corr_renderer = Any()
x_min = Float()
x_max = Float()
traits_view = View(
VGroup(
HGroup(spring, Label('Click point to select/unselect'),
spring),
#Item('plot_type'),
Item('returns_plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
#Item('num_medicion', width=-225),
orientation = "vertical"),
resizable=True, title=title
)
def _create_returns_plot(self):
plot = Plot(self.plotdata)
plot.legend.visible = True
plot.x_axis = None
x_axis = PlotAxis(plot, orientation="bottom")
plot.overlays.append(x_axis)
renderer = plot.plot(("index", "value"), type="scatter",name = "Mediciones")[0]
#Agrego todas las tools necesarias
renderer.tools.append(ScatterInspector(renderer, selection_mode="toggle", persistent_hover=False))
renderer.overlays.append(
ScatterInspectorOverlay(renderer,
hover_color = "transparent",
hover_marker_size = 10,
hover_outline_color = "purple",
hover_line_width = 2,
selection_marker_size = 8,
selection_color = "red")
)
renderer.tools.append(RangeSelection(renderer, left_button_selects = False, disable_left_mouse = True, \
rigth_button_selects = True, \
auto_handle_event = False, metadata_name = "annotations"))
renderer.overlays.append(RangeSelectionOverlay(component=renderer, metadata_name = "annotations"))
renderer.tools.append(PanTool(renderer))
renderer.overlays.append(ZoomTool(renderer, drag_button="right"))
self.index_datasource = renderer.index
self.index_datasource.on_trait_change(self._selections_changed, "metadata_changed")
self.returns_plot = plot
def _create_data(self):
#genero los datos, más adelante los voy a leer con pandas
npts = 40
x_max = 10
x = np.random.random(npts)
x = x * x_max
error = np.random.random(npts)
y = 2 + 3*x + 5*error
#Esta parte es para ordenar los elementos
x_ordenado = np.array([])
y_ordenado = np.array([])
orden = range(x.size)
nuevo_orden = np.array([])
for i in range(x.size):
arg_min = x.argmin()
x_ordenado = np.append(x_ordenado, x[arg_min])
y_ordenado = np.append(y_ordenado, y[arg_min])
nuevo_orden = np.append(nuevo_orden, orden[arg_min])
x = np.delete(x, arg_min)
y = np.delete(y, arg_min)
orden = np.delete(orden, arg_min)
self.x_ordenado = x_ordenado
self.y_ordenado = y_ordenado
#Genero el retorno para el plot
plotdata = ArrayPlotData()
plotdata.set_data("index", x_ordenado)
plotdata.set_data("value", y_ordenado)
self.plotdata = plotdata
def _selections_changed(self):
#Obtengo los puntos marcados manualmente
self.posicion_puntos_selec = self.index_datasource.metadata.get('selections', ())
#obtengo los puntos que marque con el rectangulo
seleccionado_range = self.index_datasource.metadata.get('annotations', ())
#Cuando desmarcon con el rectangu, el tipo de annotations es NoneType,
#con este if lo cambio a tuple
type_range = type(self.index_datasource.metadata['annotations'])
if type_range != tuple:
self.index_datasource.metadata['annotations'] = []
else:
self.x_min, self.x_max = seleccionado_range
#on_trait_change("posicion_puntos_selec, x_min, x_max")
def _perform_calculations(self):
plot = self.returns_plot
x_nuevo = np.append(self.x_calcular, [11, 12])
y_nuevo = np.append(self.y_calcular, [11, 12])
self.corr_renderer = plot.plot((x_nuevo, y_nuevo),
type="scatter", color="blue")[0]
plot.request_redraw()
To update the data of an existing plot, the best and simplest is to update the existing ArrayPlotData of the existing Plot instance being displayed. There are listeners inside Chaco that will take care of the redraw. Below is an example inspired from your code:
from traits.api import HasTraits, Enum, Instance, Button
from traitsui.api import View, Item, VGroup
from enable.api import ComponentEditor
from chaco.api import Plot, ArrayPlotData, PlotAxis
from numpy import arange
class PlotApp(HasTraits):
plotdata = Instance(ArrayPlotData)
returns_plot = Instance(Plot)
plot_type = Enum('line', 'scatter')
add_points = Button
traits_view = View(
VGroup(Item("add_points"),
Item('returns_plot', editor=ComponentEditor(),
show_label=False),
orientation = "vertical"),
resizable=True, title="Test"
)
def _returns_plot_default(self):
self.plotdata = ArrayPlotData(index=arange(100), value=arange(100))
plot = Plot(self.plotdata)
plot.legend.visible = True
plot.x_axis = None
x_axis = PlotAxis(plot, orientation="bottom")
plot.overlays.append(x_axis)
plot.plot(("index", "value"), type="scatter", name = "Mediciones")
return plot
def _add_points_fired(self):
current_length = len(self.plotdata["index"])
self.plotdata.set_data("index", arange(current_length+1))
self.plotdata.set_data("value", arange(current_length+1))
if __name__ == "__main__":
app = PlotApp()
app.configure_traits()
HTH,
Jonathan