National Instruments USB X-SERIES with Python-nidaqmx - python

I am trying to drive 4 independent channels using the python nidaqmx module and the NI X-series 6341 (PN: 781438-01). I have 2 analogue outputs and two digital outputs and I would like all these streams independent of each other. For some reason when I execute the code only my 2 analogue outs and digital out on line 0 fire. I do not see the digital stream on line 1. Does anyone know what may be going on here? I've tried it with another box and get the same behaviour so I don't think its hardware related. Here is the code:
import nidaqmx
from numpy import array
from nidaqmx import stream_writers
import numpy as np
from tkinter import filedialog
from numpy import genfromtxt
import pandas as pd
from nidaqmx.constants import LineGrouping
Devs = []
system = nidaqmx.system.System.local()
print(system.driver_version)
for device in system.devices:
dev = str(device).split('=')[1].split(')')[0]
Devs.append(dev)
print(device)
print(dev)
def detectDelimiter(csvFile):
with open(csvFile, 'r') as myCsvfile:
header=myCsvfile.readline()
if header.find(";")!=-1:
return ";"
if header.find(",")!=-1:
return ","
if header.find("\t")!=-1:
return "\t"
My_Data = []
My_Data_unscaled = []
def load_data():
file_path = filedialog.askopenfilename()
delim = detectDelimiter(file_path)
my_data = genfromtxt(file_path, delimiter=delim)
if len (My_Data) > 0 :
print('Deleting Data in the buffer...')
My_Data.clear()
My_Data.append(my_data)
My_Data_unscaled.append(my_data)
else:
#original_data = my_data
#My_Data = []
My_Data.append(my_data)
My_Data_unscaled.append(my_data)
load_data()
look = My_Data[0]
e_dataframe = pd.DataFrame(look)
v_step = 20/2**16
e_dataframe[0] = e_dataframe[0]*v_step
e_dataframe[1] = e_dataframe[1]*v_step
samples_new = [e_dataframe[1].T,e_dataframe[0].T]
samples_new = array(samples_new)
dig_samples_new = [e_dataframe[2].T,e_dataframe[2].T]
dig_samples_new = array(dig_samples_new)
dig_samples_new[0,0] = 1
dig_samples_new[0,0] = 1
dig_samples_new[1,0] = 1
def fire_galvos(dev,rate,dig_las):
#define channels
channel1 = dev +'/' + 'ao0' # laser trigger
channel2 = dev + '/' + 'ao1' # this is the auxillary trigger signal
channel3 = dev + '/line0'
channel4 = dev + '/line1'
#define clock
sample_clock = '/'+dev+'/ao/SampleClock'
with nidaqmx.Task() as analog_output, nidaqmx.Task() as digital_output:
dig_las = np.uint32(dig_las)
#add channels
analog_output.ao_channels.add_ao_voltage_chan(channel1,'mychannel1',-10,10)
analog_output.ao_channels.add_ao_voltage_chan(channel2,'mychannel2',-10,10)
digital_output.do_channels.add_do_chan(channel3, 'mychannel3')
digital_output.do_channels.add_do_chan(channel4, 'mychannel4')
#digital_output.do_channels.add_do_chan(channel4, 'mychannel4',line_grouping=LineGrouping.CHAN_PER_LINE)
#digital_output.ao_load_impedance = 50
#define clock timings
analog_output.timing.cfg_samp_clk_timing(rate=rate, sample_mode=nidaqmx.constants.AcquisitionType.FINITE, samps_per_chan = len(samples_new[0]))
digital_output.timing.cfg_samp_clk_timing(rate=rate, source = sample_clock, sample_mode=nidaqmx.constants.AcquisitionType.FINITE, samps_per_chan=len(dig_samples_new[0])) #source=sample_clock,
#writing commands
writer_ana = stream_writers.AnalogMultiChannelWriter(analog_output.out_stream, auto_start=False)
writer_dig = stream_writers.DigitalMultiChannelWriter(digital_output.out_stream,auto_start=False)
#writer_dig = stream_writers.DigitalSingleChannelWriter(digital_output.out_stream,auto_start=False)
writer_ana.write_many_sample(samples_new)
writer_dig.write_many_sample_port_uint32(dig_las)
digital_output.start()
analog_output.start()
digital_output.wait_until_done(timeout=60)
analog_output.wait_until_done(timeout=60)
fire_galvos(dev,3000,dig_samples_new)```

Related

Annotating images from h5 file

.Hi all, I have 70k images saved into .h5 file and now with this script I want to read from that file and annotate text instances into .json file. When I run this script it takes very long time to annotate 1 image (cca 2h).
When I do this with 15 images then the script works fine and annotate all 15 images about a few seconds.
Now with 70k images -> .h5 file is 51gb.
I don't know is problem in code or the h5 file is too big? Because code works fine with small amount of images, but I'm working on some project where I need 70k or 700k images.
from __future__ import division
import os
import os.path as osp
from re import U
import numpy as np
import matplotlib.pyplot as plt
import h5py
from common import *
import json
import cv2
import numpy as np
from itertools import cycle
import js2py
#from gen import brojac
#from synthgen import imnames
global x
global y
def write_json(data, filename='annotation.json'):
with open(filename,'w') as file:
json.dump(data,file,indent=4)
DATA_PATH = 'results'
DB_FNAME = osp.join(DATA_PATH,'SynthText.h5')
def get_data():
return h5py.File(DB_FNAME,'r')
def viz_textbb(text_im, imageName, charBB_list, wordBB, textToList, alpha=1.0):
"""
text_im : image containing text
charBB_list : list of 2x4xn_i bounding-box matrices
wordBB : 2x4xm matrix of word coordinates
"""
#print("k",z, type(z))
plt.close(1)
plt.figure(1)
plt.imshow(text_im)
H,W = text_im.shape[:2]
global imnames
#print("MOLIIIM",wordBB)
#DODANO IZ MAIN-a
#**********************************************
db = h5py.File('results/SynthText.h5', 'r')
dsets = sorted(db['data'].keys())
for k in dsets:
db = get_data()
imnames = sorted(db['data'].keys())
start = 0
count = 0
coordinate = []
coordinate1 = []
name = []
name1 = []
final = []
upperList = []
downList = []
counter = 0
FinalFinal = []
imageData = { }
dictList = []
for eachWord in textToList:
length = len(eachWord)
for i in range(0,4):
for j in range(start,length+start):
coordinate.append([charBB_list[0][0][i][j], charBB_list[0][1][i][j]])
coordinate1.append((charBB_list[0][0][i][j], charBB_list[0][1][i][j]))
name.append(coordinate)
name1.append(coordinate1)
coordinate = []
for j in range(0, length):
for i in range(len(name)) :
#print(i,j, name[i][j]) ## koordinate da se snađem, treba
final.append(name[i][j])
#print(name)
#NEŠTA ZA CRTANJE, NEBITNO
if(i == 0 or i == 1):
upperList.append(name[i][j])
if(i == 2):
downList.append(name[i+1][j])
if(i == 3):
downList.append(name[i-1][j])
down = reversed(downList)
joinList = [*upperList,*down,upperList[0]]
FinalFinal.append(joinList)
imageData['transcription']=eachWord
imageData['language']="Latin"
imageData['illegibility']=False
imageData['points']=final
dictionary_copy = imageData.copy()
dictList.append(dictionary_copy)
del(dictionary_copy)
finalToList = np.array(final)
name=[]
final = []
upperList = []
downList = []
start = len(eachWord) + start
#del(dictList[0])
finalDict = {f'gt_{imageName}':dictList}
#print(type(finalDict)) --> dict
#print(imageName,finalDict)
#print(finalDict)
#print(len(textToList))
#print(textToList)
with open("annotation.json") as json_file:
data=json.load(json_file)
temp=data["annotations"]
#temp.append(finalDict)
temp.update(finalDict)
#temp['annotations'] = finalDict
write_json(data)
json_file.close()
for list in FinalFinal:
x,y = zip(*list)
plt.plot(x,y)
#print(x,y)
# points = tuple(zip(x,y))
# # boundaries of the bounding box
# left, right = min(points, key=lambda p: p[0]), max(points, key=lambda p: p[0])
# bottom, top = min(points, key=lambda p: p[1]), max(points, key=lambda p: p[1])
# # area
# base = right[0] - left[0]
# height = top[1] - bottom[1]
# A = base * height
#print(A)
for i in range(len(charBB_list)):
# #print(charBB_list) #ispisuje x-eve za jedan vrh svih instanci pojedinih slova, pa drugi, 3. i 4. i onda posebno y-one
bbs = charBB_list[i]
ni = bbs.shape[-1]
for j in range(ni):
bb = bbs[:,:,j]
bb = np.c_[bb,bb[:,0]] #ako se doda ,bb[:,0] -> printa isto kao i gornji lijevi
#plt.plot(bb[0,:], bb[1,:], 'r', alpha=alpha)
# plot the word-BB:
for i in range(wordBB.shape[-1]):
bb = wordBB[:,:,i] #koordinate wordBB-a
bb = np.c_[bb,bb[:,0]] #spaja skroz lijevu, TREBA
#plt.plot(bb[0,:], bb[1,:], 'g', alpha=alpha)
# visualize the indiv vertices:
vcol = ['r','g','b','k']
#for j in range(4):
#plt.scatter(bb[0,j],bb[1,j],color=vcol[j])
#print(bb) # ----> KOORDINATE wordBB-a
#print(bb[1,j])
plt.gca().set_xlim([0,W-1])
plt.gca().set_ylim([H-1,0])
plt.show(block=False)
def main(db_fname):
db = h5py.File(db_fname, 'r')
dsets = sorted(db['data'].keys())
print ("total number of images : ", colorize(Color.RED, len(dsets), highlight=True))
for k in dsets:
rgb = db['data'][k][...]
charBB = db['data'][k].attrs['charBB']
wordBB = db['data'][k].attrs['wordBB']
txt = db['data'][k].attrs['txt']
textToList = (db['data'][k].attrs['txt']).tolist()
#print(textToList)
viz_textbb(rgb, k,[charBB], wordBB, textToList)
print ("image name : ", colorize(Color.RED, k, bold=True))
print (" ** no. of chars : ", colorize(Color.YELLOW, charBB.shape[-1]))
print (" ** no. of words : ", colorize(Color.YELLOW, wordBB.shape[-1]))
print (" ** text : ", colorize(Color.GREEN, txt))
#print("To know", z[1], type(z[1]))
# OTKOMATI OVO DOLJE AKO ŽELIM STISKAT ENTER
# if 'q' in input("next? ('q' to exit) : "):
# break
db.close()
if __name__=='__main__':
main('results/SynthText.h5')

pandas data-frame continuously update

please see the pandas based Patten scanner, here i am using csv as data source and loading the same in to data.
since data is loading from csv file, i have to reload/rerun the script every 5 min to read the updated csv file hence repeating the plot every 5min.
is there any way to use df.update to avoid reloading of the script and prevent the reloading of plot again and again.
import pandas as pd
import numpy as np
from scipy.signal import argrelextrema
import matplotlib.pyplot as plt
from harmonic_functions import *
import uuid
from csv import DictReader
data = pd.read_csv('temp.csv')
data.time = pd.to_datetime(data.time,format='%d.%m.%Y %H:%M:%S.%f')
data.index = data['time']
# data = data.drop_duplicates(keep=False)
price = data.close.copy()
err_allowed = 10.0/100
pnl = []
trade_dates=[]
correct_pats=0
pats=0
# plt.ion()
for i in range (100,len(price)):
current_idx,current_pat,start,end = peak_detect(price.values[:i],order=7)
XA = current_pat[1] - current_pat[0]
AB = current_pat[2] - current_pat[1]
BC = current_pat[3] - current_pat[2]
CD = current_pat[4] - current_pat[3]
moves = [XA,AB,BC,CD]
gart = is_gartley(moves,err_allowed)
butt = is_butterfly(moves,err_allowed)
bat = is_bat(moves,err_allowed)
crab = is_crab(moves,err_allowed)
shark = is_shark(moves,err_allowed)
trio = is_trio(moves,err_allowed)
cyph = is_cyph(moves,err_allowed)
three_dives = is_3dives(moves, err_allowed)
fivezero = is_50(moves, err_allowed)
altbat = is_altbat(moves, err_allowed)
deepcrab = is_deepcrab(moves, err_allowed)
dragon = is_dragon(moves, err_allowed)
snorm = is_snorm(moves, err_allowed)
harmonics = np.array([gart,butt,bat,crab,shark,trio,cyph,three_dives,fivezero,altbat,deepcrab,dragon,snorm])
labels = ['Garterly','Butterfly','Bat','Crab','Shark','Trio','Cypher','3Dives','5Zero','AltBat','DeepCrab','Dragon','Snorm']
if np.any(harmonics == 1) or np.any(harmonics == -1):
for j in range (0,len(harmonics)):
if harmonics[j] == 1 or harmonics[j]==-1:
pats+=1
sense = 'Bearish ' if harmonics[j]==-1 else 'Bullish '
label = sense + labels[j] + ' found'
print(label)
print(price.values[start])
plt.title(label)
plt.plot(np.arange(start,i+5),price.values[start:i+5])
plt.scatter(current_idx,current_pat,c='r')
filename = str(uuid.uuid1())[:8]
print(current_pat)
print(current_idx)
# with open('temp.csv', mode='r') as csv_file:
# file = DictReader(csv_file, delimiter=',')
# close = str(current_pat[4])
# print(current_pat)
# rows = [row for row in file if row['close'] in close]
# closetime = rows[-1]['ID']
# print(closetime)
write1 = str(current_idx)
write2 = str(current_pat)
write = write1 + ',' + write2
print(write)
with open("datadb", "r+") as file:
for line in file:
if write in line:
break
else: # not found, we are at the eof
file.write(f"{write}\n") # append missing data
print(filename)
plt.savefig(filename)
plt.close(filename)
# plt.show()
plt.clf()

APIErrorException: (BadArgument) 'recognitionModel' is incompatible :AZURE COGNITIVE FACE

I'm creating an attendance system using AZURE COGNITIVE FACE API. I am storing the attendance in an excel sheet. But there occurs an error " 'recognitionModel' is incompatible." From the documentation I have come to know that there are two recognition models(recognition_01 , recognition_02). Is it required to mention the type? If so how to do it in python?
ERROR:
File "identify.py", line 58, in <module>
res = face_client.face.identify(faceIds, global_var.personGroupId)
File "C:\Python\Python36\lib\site-packages\azure\cognitiveservices\vision\face\operations\_face_operations.py", line 313, in identify
raise models.APIErrorException(self._deserialize, response)
azure.cognitiveservices.vision.face.models._models_py3.APIErrorException: (BadArgument) 'recognitionModel' is incompatible.
CODE:
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, SnapshotObjectType, OperationStatusType
import global_variables as global_var
import os, urllib
import sqlite3
from openpyxl import Workbook, load_workbook
from openpyxl.utils import get_column_letter, column_index_from_string
from openpyxl.cell import Cell
import time
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
#get current date
currentDate = time.strftime("%d_%m_%y")
wb = load_workbook(filename = "reports.xlsx")
sheet = wb['Cse16']
def getDateColumn():
for i in range(1, len(list(sheet.rows)[0]) + 1):
col = get_column_letter(i)
if sheet['%s%s'% (col,'1')].value == currentDate:
return col
Key = global_var.key
ENDPOINT = 'https://centralindia.api.cognitive.microsoft.com'
face_client = FaceClient(ENDPOINT,CognitiveServicesCredentials(Key))
connect = sqlite3.connect("Face-DataBase")
attend = [0 for i in range(60)]
currentDir = os.path.dirname(os.path.abspath(__file__))
directory = os.path.join(currentDir, 'Cropped_faces')
for filename in os.listdir(directory):
if filename.endswith(".jpg"):
print(filename)
img_data = open(os.path.join(directory,filename), 'r+b')
res = face_client.face.detect_with_stream(img_data)
print("Res = {}".format(res))
if len(res) < 1:
print("No face detected.")
continue
faceIds = []
for face in res:
faceIds.append(face.face_id)
res = face_client.face.identify(faceIds, global_var.personGroupId) #Error occuring line
#print(filename)
print("res = {}".format(res))
for face in res:
if not face['candidates']:
print("Unknown")
else:
personId = face['candidates'][0]['personId']
print("personid = {}".format(personId))
#cmd = + personId
cur = connect.execute("SELECT * FROM Students WHERE personID = (?)", (personId,))
#print("cur = {}".format(cur))
for row in cur:
print("aya")
print("row = {}".format(row))
attend[int(row[0])] += 1
print("---------- " + row[1] + " recognized ----------")
time.sleep(6)
for row in range(2, len(list(sheet.columns)[0]) + 1):
rn = sheet.cell(row = row, column =1).value
if rn is not None:
print("rn = {}".format(rn))
rn = rn[-2:]
if attend[int(rn)] != 0:
col = getDateColumn()
print("col = {}".format(col))
sheet['%s%s' % (col, str(row))] = 0
wb.save(filename = "reports.xlsx")
As mentioned in the services documentation portal (for example here for West Europe but is the same for all regions) for Identify operation:
The 'recognitionModel' associated with the query faces' faceIds should
be the same as the 'recognitionModel' used by the target person group
or large person group.
So it looks like you have a mismatch here. You don't have to pass the recognitionModel in the Identify operation but in the Detect operation that you are doing first.
And you must ensure that this value is the same as the one used for your personGroup where you want to identify the person (see personGroup create operation, containing the recognition variable)

Saving 3D+Time (XYCZT) stacks with Bio-Formats on Python

I'm having some trouble to save 3D+Time Tiff files, from Numpy arrays in Python, using the Bioformats standard.
For a start, I have a numpy array with 5 dimensions, ordered XYCZT, in my case (267, 518, 1, 331, 3).
Here is the function I have so far:
# Write file to disk
def write(img_XYCZT, path, type='uint16', verbose=True):
import bioformats.omexml as ome
import javabridge as jutil
import bioformats
import numpy as np
import os
import sys
if verbose:
print('Dimensions (XYCZT): ' + str(np.shape(img_XYCZT)))
sys.stdout.flush()
# Get the new dimensions
SizeX = np.shape(img_XYCZT)[0]
SizeY = np.shape(img_XYCZT)[1]
SizeC = np.shape(img_XYCZT)[2]
SizeZ = np.shape(img_XYCZT)[3]
SizeT = np.shape(img_XYCZT)[4]
# Start JVM for bioformats
jutil.start_vm(class_path=bioformats.JARS)
# Getting metadata info
omexml = ome.OMEXML()
omexml.image(0).Name = os.path.split(path)[1]
p = omexml.image(0).Pixels
assert isinstance(p, ome.OMEXML.Pixels)
p.SizeX = SizeX
p.SizeY = SizeY
p.SizeC = SizeC
p.SizeT = SizeT
p.SizeZ = SizeZ
p.DimensionOrder = ome.DO_XYCZT
p.PixelType = type
p.channel_count = SizeC
p.plane_count = SizeZ
p.Channel(0).SamplesPerPixel = SizeC
omexml.structured_annotations.add_original_metadata(ome.OM_SAMPLES_PER_PIXEL, str(SizeC))
# Converting to omexml
xml = omexml.to_xml()
# Write file using Bioformats
if verbose:
print ('Writing frames:'),
sys.stdout.flush()
for frame in range(SizeT):
if verbose:
print('[' + str(frame + 1) + ']'),
sys.stdout.flush()
index = frame
pixel_buffer = bioformats.formatwriter.convert_pixels_to_buffer(img_XYCZT[:, :, :, :, frame], type)
script = """
importClass(Packages.loci.formats.services.OMEXMLService,
Packages.loci.common.services.ServiceFactory,
Packages.loci.formats.out.TiffWriter);
var service = new ServiceFactory().getInstance(OMEXMLService);
var metadata = service.createOMEXMLMetadata(xml);
var writer = new TiffWriter();
writer.setBigTiff(true);
writer.setMetadataRetrieve(metadata);
writer.setId(path);
writer.setInterleaved(true);
writer.saveBytes(index, buffer);
writer.close();
"""
jutil.run_script(script, dict(path=path, xml=xml, index=index, buffer=pixel_buffer))
if verbose:
print ('[Done]')
sys.stdout.flush()
if verbose:
print('File saved on ' + str(path))
sys.stdout.flush()
Being img_XYCZT the numpy array, and path the place to save the file. Probably the function uses lots of redundancy for the metadata, but thats me fighting for it to work somehow...
Checking the saved file on Fiji, the Z information is as C channels:
The file simply doesn't have the Z dimension... I've been struggling with this for some time, any help is highly appreciated !
Thanks

Getting the memory layout out of an (avr)elf file by useing python + pyElftools

I am creating my own bootloader for an ATXmega128A4U. To use the bootloader I want to transform the ELF-file of the firmware into a memory map used in the the ATXmega.
For that I use python and the modul "pyelftools". The documentation of it is poor and so I run into a problem: I do not know what information I can use to get the address, offset etc. from the data at the sections.
My goal is to create a bytearray, copy the data/code into it and transfer it to the bootlaoder. Below is my code:
import sys
# If pyelftools is not installed, the example can also run from the root or
# examples/ dir of the source distribution.
sys.path[0:0] = ['.', '..']
from elftools.common.py3compat import bytes2str
from elftools.elf.elffile import ELFFile
# 128k flash for the ATXmega128a4u
flashsize = 128 * 1024
def process_file(filename):
with open(filename, 'rb') as f:
# get the data
elffile = ELFFile(f)
dataSec = elffile.get_section_by_name(b'.data')
textSec = elffile.get_section_by_name(b'.text')
# prepare the memory
flashMemory = bytearray(flashsize)
# the data section
startAddr = dataSec.header.sh_offset
am = dataSec.header.sh_size
i = 0
while i < am:
val = dataSec.stream.read(1)
flashMemory[startAddr] = val[0]
startAddr += 1
i += 1
# the text section
startAddr = textSec.header.sh_offset
am = textSec.header.sh_size
i = 0
while i < am:
print(str(startAddr) + ' : ' + str(i))
val = textSec.stream.read(1)
flashMemory[startAddr] = val[0]
startAddr += 1
i += 1
print('finished')
if __name__ == '__main__':
process_file('firmware.elf')
Hope someone can tell me how to solve this problem.
I manged to solve the problem.
don't read the data manualy from the stream by "textSec.stream.read" use "textSec.data()" instead. Internaly (see "sections.py") a seek operation in the file is done. Afterwards the data is read. The result will be the valid data chunk.
The following code reads the code(text) section of a atxmega firmware and copies it into a bytearray which has the layout of the flash of an atxmega128a4u device.
#vlas_tepesch: the hex conversation is not needed and the the 64k pitfall is avoided.
sys.path[0:0] = ['.', '..']
from elftools.common.py3compat import bytes2str
from elftools.elf.elffile import ELFFile
# 128k flash for the ATXmega128a4u
flashsize = 128 * 1024
def __printSectionInfo (s):
print ('[{nr}] {name} {type} {addr} {offs} {size}'.format(
nr = s.header['sh_name'],
name = s.name,
type = s.header['sh_type'],
addr = s.header['sh_addr'],
offs = s.header['sh_offset'],
size = s.header['sh_size']
)
)
def process_file(filename):
print('In file: ' + filename)
with open(filename, 'rb') as f:
# get the data
elffile = ELFFile(f)
print ('sections:')
for s in elffile.iter_sections():
__printSectionInfo(s)
print ('get the code from the .text section')
textSec = elffile.get_section_by_name(b'.text')
# prepare the memory
flashMemory = bytearray(flashsize)
# the text section
startAddr = textSec.header['sh_addr']
val = textSec.data()
flashMemory[startAddr:startAddr+len(val)] = val
# print memory
print('finished')
if __name__ == '__main__':
process_file('firmware.elf')
Tanks for the comments!

Categories