I'm trying to use this code in Webots for one of the universal robots. The code works well until I try to the "ikResults" in line 49. The program is trying to use the least_squares.py but I'm getting the error for "'x0' is infeasible". This is the code I'm using:
import sys
import tempfile
try:
import ikpy
from ikpy.chain import Chain
import math
from controller import Supervisor
IKPY_MAX_ITERATIONS = 4
supervisor = Supervisor()
timeStep = int(4 * supervisor.getBasicTimeStep())
filename = None
with tempfile.NamedTemporaryFile(suffix='.urdf', delete=False) as file:
filename = file.name
file.write(supervisor.getUrdf().encode('utf-8'))
armChain = Chain.from_urdf_file(filename, active_links_mask=[False, True, True, True, True,
True, True, False, True, True, True])
motors = []
for link in armChain.links:
if 'joint' in link.name and link.name !="wrist_3_link_gripper_joint":
motor = supervisor.getDevice(link.name)
motor.setVelocity(1.0)
position_sensor = motor.getPositionSensor()
position_sensor.enable(timeStep)
motors.append(motor)
target = supervisor.getFromDef('TARGET')
arm = supervisor.getSelf()
while supervisor.step(timeStep) != -1:
targetPosition = target.getPosition()
armPosition = arm.getPosition()
x = targetPosition[0] - armPosition[0]
y = targetPosition[1] - armPosition[1]
z = targetPosition[2] - armPosition[2]
initial_position = [0] + [m.getPositionSensor().getValue() for m in motors] + [0]
ikResults = armChain.inverse_kinematics([x, y, z], max_iter=IKPY_MAX_ITERATIONS,
initial_position=initial_position)`
I've tried incrementing the iterations, changing the target's position, changing the status for the links in armChain (true or false), but nothing seemed to solve this issue. Reading other similar forums, it seems to do something with the bounds, not sure how to check on this.
Related
I am studying with maya python api.
I'm making a simple mult_matrix node plugin, and for some reason, the multiplied output keeps coming out to zero. I'm going to receive the output by matrix.
Can you help me?
def compute(self,plug,data):
enter code here
if plug == node.Moutput:
readerMtx = data.inputValue(node.reader).asFloathMatrix()
targetMtx = data.inputValue(node.target).asFloathMatrix()
result = readerMtx * targetMtx
outputHandle = data.outputValue(node.Moutput)
outputHandle.setMFloatMatrix(result)
data.setClean(plug)
def initializer():
mtxAttr1 = OpenMaya.MFnMatrixAttribute()
node.reader = mtxAttr1.create("In_Matrix","reader",OpenMaya.MFnMatrixData.kMatrix)
mtxAttr1.setKeyable = True
mtxAttr1.setStorable = True
mtxAttr1.Readable = True
mtxAttr1.setWrit = True
mtxAttr2 = OpenMaya.MFnMatrixAttribute()
node.Target = mtxAttr2.create("Target_Matrix","reader",OpenMaya.MFnMatrixData.kMatrix)
mtxAttr2.setKeyable = True
mtxAttr2.setStorable = True
mtxAttr2.Readable = True
mtxAttr2.setWrit = True
mtxAttr3 = OpenMaya.MFnMatrixAttribute()
node.MOutput = mtxAttr3.create("In_Matrix","Moutput",OpenMaya.MFnMatrixData.kMatrix)
mtxAttr3.writable = True
mtxAttr3.keyable = True
node.addAttribute(node.reader)
node.addAttribute(node.Target)
node.addAttribute(node.MOutput)
node.AttributeAffects(node.reader , node.MOutput)
node.AttributeAffects(node.Target , node.MOutput)
Help me a lot!
I am trying to use stockfish to evaluate a chess position using FEN notation all in Python. I am mainly using two libraries (pgnToFen I found on github here: https://github.com/SindreSvendby/pgnToFen and Stockfish the MIT licensed one here: https://github.com/zhelyabuzhsky/stockfish). After many bugs I have reached problem after problem. Stockfish not only can't analyse this FEN position (3b2k1/1p3pp1/8/3pP1P1/pP3P2/P2pB3/6K1/8 b f3 -) but it infinitely loops! "No worries!" and thought changing the source code would be accomplishable. Changed to _put(), but basically I am unable to put dummy values in because stdin.flush() won't execute once I give it those values! Meaning I don't even think I can skip to the next row in my dataframe. :( The code I changed is below.
def _put(self, command: str, tmp_time) -> None:
if not self.stockfish.stdin:
raise BrokenPipeError()
self.stockfish.stdin.write(f"{command}\n")
try:
self.stockfish.stdin.flush()
except:
if command != "quit":
self.stockfish.stdin.write('isready\n')
try:
time.sleep(tmp_time)
self.stockfish.stdin.flush()
except:
#print ('Imma head out', file=sys.stderr)
raise ValueError('Imma head out...')
#sys.stderr.close()
def get_evaluation(self) -> dict:
"""Evaluates current position
Returns:
A dictionary of the current advantage with "type" as "cp" (centipawns) or "mate" (checkmate in)
"""
evaluation = dict()
fen_position = self.get_fen_position()
if "w" in fen_position: # w can only be in FEN if it is whites move
compare = 1
else: # stockfish shows advantage relative to current player, convention is to do white positive
compare = -1
self._put(f"position {fen_position}", 5)
self._go()
x=0
while True:
x=x+1
text = self._read_line()
#print(text)
splitted_text = text.split(" ")
if splitted_text[0] == "info":
for n in range(len(splitted_text)):
if splitted_text[n] == "score":
evaluation = {
"type": splitted_text[n + 1],
"value": int(splitted_text[n + 2]) * compare,
}
elif splitted_text[0] == "bestmove":
return evaluation
elif x == 500:
evaluation = {
"type": 'cp',
"value": 10000,
}
return evaluation
and last but not least change to the init_ contructor below:
self._stockfish_major_version: float = float(self._read_line().split(" ")[1])
And the code where I am importing this code to is below, this is where errors pop up.
import pandas as pd
import re
import nltk
import numpy as np
from stockfish import Stockfish
import os
import sys
sys.path.insert(0, r'C:\Users\path\to\pgntofen')
import pgntofen
#nltk.download('punkt')
#Changed models.py for major version line 39 in stockfish from int to float
stockfish = Stockfish(r"C:\Users\path\to\Stockfish.exe")
file = r'C:\Users\path\to\selenium-pandas output.csv'
chunksize = 10 ** 6
for chunk in pd.read_csv(file, chunksize=chunksize):
for index, row in chunk.iterrows():
FullMovesStr = str(row['FullMoves'])
FullMovesStr = FullMovesStr.replace('+', '')
if "e.p" in FullMovesStr:
row.to_csv(r'C:\Users\MyName\Logger.csv', header=None, index=False, mode='a')
print('Enpassant')
continue
tokens = nltk.word_tokenize(FullMovesStr)
movelist = []
for tokenit in range(len(tokens)):
if "." in str(tokens[tokenit]):
try:
tokenstripped = re.sub(r"[0-9]+\.", "", tokens[tokenit])
token = [tokenstripped, tokens[tokenit+1]]
movelist.append(token)
except:
continue
else:
continue
DFMoves = pd.DataFrame(movelist, columns=[['WhiteMove', 'BlackMove']])
DFMoves['index'] = row['index']
DFMoves['Date'] = row['Date']
DFMoves['White'] = row['White']
DFMoves['Black'] = row['Black']
DFMoves['W ELO'] = row['W ELO']
DFMoves['B ELO'] = row['B ELO']
DFMoves['Av ELO'] = row['Av ELO']
DFMoves['Event'] = row['Event']
DFMoves['Site'] = row['Site']
DFMoves['ECO'] = row['ECO']
DFMoves['Opening'] = row['Opening']
pd.set_option('display.max_rows', DFMoves.shape[0]+1)
print(DFMoves[['WhiteMove', 'BlackMove']])
seqmoves = []
#seqmovesBlack = []
evalmove = []
pgnConverter = pgntofen.PgnToFen()
#stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
#rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1
for index, row in DFMoves.iterrows():
try:
stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
except:
evalmove.append("?")
continue
#stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
pgnConverter.resetBoard()
WhiteMove = str(row['WhiteMove'])
BlackMove = str(row['BlackMove'])
if index == 0:
PGNMoves1 = [WhiteMove]
seqmoves.append(WhiteMove)
#seqmoves.append(BlackMove)
else:
seqmoves.append(WhiteMove)
#seqmoves.append(BlackMove)
PGNMoves1 = seqmoves.copy()
#print(seqmoves)
try:
pgnConverter.pgnToFen(PGNMoves1)
fen = pgnConverter.getFullFen()
except:
break
try:
stockfish.set_fen_position(fen)
print(stockfish.get_board_visual())
evalpos = stockfish.get_evaluation()
evalmove.append(evalpos)
except:
pass
try:
stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
except:
evalmove.append("?")
continue
pgnConverter.resetBoard()
if index == 0:
PGNMoves2 = [WhiteMove, BlackMove]
seqmoves.append(BlackMove)
else:
seqmoves.append(BlackMove)
PGNMoves2 = seqmoves.copy()
try:
pgnConverter.pgnToFen(PGNMoves2)
fen = pgnConverter.getFullFen()
except:
break
try:
stockfish.set_fen_position(fen)
print(stockfish.get_board_visual())
evalpos = stockfish.get_evaluation()
print(evalpos)
evalmove.append(evalpos)
except:
pass
#DFMoves['EvalWhite'] = evalwhite
#DFMoves['EvalBlack'] = evalblack
print(evalmove)
So the detailed question is getting stockfish.get_evalution() to just skip, or better yet fix the problem, for this ( 3b2k1/1p3pp1/8/3pP1P1/pP3P2/P2pB3/6K1/8 b f3 - ) FEN position. I have been working on this problem for quite a while so any insight into this would be very much appreciated.
My specs are Windows 10, Python 3.9, Processor:Intel(R) Core(TM) i9-10980XE CPU # 3.00GHz 3.00 GHz and RAM is 64.0 GB.
Thanks :)
Ok. It seems your fen is invalid (3b2k1/1p3pp1/8/3pP1P1/pP3P2/P2pB3/6K1/8 b f3 -). So check that. And python-chess (https://python-chess.readthedocs.io/en/latest/index.html) library allows you to use FEN AND chess engines. So, pretty cool no ? Here is an example of theses two fantastics tools :
import chess
import chess.engine
import chess.pgn
pgn = open("your_pgn_file.pgn")
game = chess.pgn.read_game(pgn)
engine = chess.engine.SimpleEngine.popen_uci("your_stockfish_path.exe")
# Iterate through all moves, play them on a board and analyse them.
board = game.board()
for move in game.mainline_moves():
board.push(move)
print(engine.analyse(board, chess.engine.Limit(time=0.1))["score"])
I'm trying to make a simple button that wiil set global animation preferences in maya.
The script is supposed to change the animation preferences of DEFAULT IN TANGENT , DEFAULT OUT TANGENT and DEFAULT TANGENT WEIGHT.
so far this is what I got:
import pymel.core as pm
def setBlockingPrefs():
pm.keyTangent(edit = True, g = True, itt = 'clamped')
pm.keyTangent(edit = True, g = True, ott = 'step')
pm.keyTangent(edit = True, g = True, weightedTangents = 1)
def setSpliningPrefs():
pm.keyTangent(edit = True, g = True, itt = 'auto')
pm.keyTangent(edit = True, g = True, ott = 'auto')
pm.keyTangent(edit = True, g = True, weightedTangents = 1)
I get no error with this code, but then, i get nothing at all, the preferences are not changed.
What am I missing?
Thank you
I started to work in the field of computational chemistry and I was ask to do Principal Component Analysis on some trajectory from molecular dynamics. I was told to use MDAnalysis package, thus I find one tutorial on their page a tried to follow it (but I included my own inputs of course) to see if it will be working. I have never done analysis like this ad I am also new to python coding.
I attached my code inspired by tutorial. But it doesnt work for me, it raises many errors, one of the errors is that it cant take my inputs (topology is PDB file, coordinate is XTC file), but those are formats which are listed in supported formats or other error is that "class PCA" is not defined.
I didnt find much about dealing with PCA using MDAanalysis from other people, thus I hoped that here I could find someone, who have ever done something like this and could, please, help me. I have alreadz tried related subreddits, but without result.
from __future__ import division, absolute_import
import MDAnalysis as mda
import MDAnalysis.analysis.pca as pca
from six.moves import range
import warnings
import numpy as np
import scipy.integrate
from MDAnalysis import Universe
from MDAnalysis.analysis.align import _fit_to
from MDAnalysis.lib.log import ProgressMeter
u = mda.Universe("L22trial.pdb", "L22trial.xtc")
PCA = mda.analysis.pca.PCA
class PCA():
pca = PCA(u, select='backbone').run()
pca_space = pca.transform(u.select_atoms('backbone'))
def __init__(self, universe, select='all', align=False, mean=None,
n_components=None, **kwargs):
super(PCA, self).__init__(universe.trajectory, **kwargs)
self._u = universe
self.align = align
self._calculated = False
self.n_components = n_components
self._select = select
self._mean = mean
def _prepare(self):
self._u.trajectory[self.start]
self._reference = self._u.select_atoms(self._select)
self._atoms = self._u.select_atoms(self._select)
self._n_atoms = self._atoms.n_atoms
if self._mean is None:
self.mean = np.zeros(self._n_atoms*3)
self._calc_mean = True
else:
self.mean = self._mean.positions
self._calc_mean = False
if self.n_frames == 1:
raise ValueError('No covariance information can be gathered from a single trajectory frame.\n')
n_dim = self._n_atoms * 3
self.cov = np.zeros((n_dim, n_dim))
self._ref_atom_positions = self._reference.positions
self._ref_cog = self._reference.center_of_geometry()
self._ref_atom_positions -= self._ref_cog
if self._calc_mean:
interval = int(self.n_frames // 100)
interval = interval if interval > 0 else 1
format = ("Mean Calculation Step %(step)5d/%(numsteps)d [%(percentage)5.1f%%]")
mean_pm = ProgressMeter(self.n_frames if self.n_frames else 1, interval=interval, verbose=self._verbose, format=format)
for i, ts in enumerate(self._u.trajectory[self.start:self.stop:self.step]):
if self.align:
mobile_cog = self._atoms.center_of_geometry()
mobile_atoms, old_rmsd = _fit_to(self._atoms.positions, self._ref_atom_positions, self._atoms, mobile_com=mobile_cog, ref_com=self._ref_cog)
else:
self.mean += self._atoms.positions.ravel()
mean_pm.echo(i)
self.mean /= self.n_frames
self.mean_atoms = self._atoms
self.mean_atoms.positions = self._atoms.positions
def _single_frame(self):
if self.align:
mobile_cog = self._atoms.center_of_geometry()
mobile_atoms, old_rmsd = _fit_to(self._atoms.positions, self._ref_atom_positions, self._atoms, mobile_com=mobile_cog, ref_com=self._ref_cog)
x = mobile_atoms.positions.ravel()
else:
x = self._atoms.positions.ravel()
x -= self.mean
self.cov += np.dot(x[:, np.newaxis], x[:, np.newaxis].T)
def _conclude(self):
self.cov /= self.n_frames - 1
e_vals, e_vects = np.linalg.eig(self.cov)
sort_idx = np.argsort(e_vals)[::-1]
self.variance = e_vals[sort_idx]
self.variance = self.variance[:self.n_components]
self.p_components = e_vects[:self.n_components, sort_idx]
self.cumulated_variance = (np.cumsum(self.variance) / np.sum(self.variance))
self._calculated = True
def transform(self, atomgroup, n_components=None, start=None, stop=None, step=None):
if not self._calculated:
raise ValueError('Call run() on the PCA before using transform')
if isinstance(atomgroup, Universe):
atomgroup = atomgroup.atoms
if(self._n_atoms != atomgroup.n_atoms):
raise ValueError('PCA has been fit for {} atoms. Your atomgroup has {} atoms'.format(self._n_atoms, atomgroup.n_atoms))
if not (self._atoms.types == atomgroup.types).all():
warnings.warn('Atom types do not match with types used to fit PCA')
traj = atomgroup.universe.trajectory
start, stop, step = traj.check_slice_indices(start, stop, step)
n_frames = len(range(start, stop, step))
dim = (n_components if n_components is not None else self.p_components.shape[1])
dot = np.zeros((n_frames, dim))
for i, ts in enumerate(traj[start:stop:step]):
xyz = atomgroup.positions.ravel() - self.mean
dot[i] = np.dot(xyz, self.p_components[:, :n_components])
return dot
def cosine_content(pca_space, i):
t = np.arange(len(pca_space))
T = len(pca_space)
cos = np.cos(np.pi * t * (i + 1) / T)
return ((2.0 / T) * (scipy.integrate.simps(cos*pca_space[:, i])) ** 2 /
scipy.integrate.simps(pca_space[:, i] ** 2))
it seems you copied and pasted the PCA class itsefl. My guess is that you don't need to do this (I have never used that module so it s just a guess).
The documentation ( https://www.mdanalysis.org/docs/documentation_pages/analysis/pca.html ) seems to indicate the only thing you need to do is the following
import MDAnalysis as mda
import MDAnalysis.analysis.pca as pca
u = mda.Universe("L22trial.pdb", "L22trial.xtc")
mypca = pca.PCA(u, select='backbone').run()
pca_space = mypca.transform(u.select_atoms('backbone'))
If you have an error message "No module named 'MDAnalysis.analysis.pca.PCA'; 'MDAnalysis.analysis.pca' is not a package" it means what it says :-).
That means there is no package on your computer named MDAnalysis. to fix this you need to install using pip install command or conda if you use conda package manager. See this link https://www.mdanalysis.org/pages/installation_quick_start/
Looking at the link https://www.mdanalysis.org/docs/_modules/MDAnalysis/analysis/pca.html from which you got inspired it confirmed my first guess and I think my answer should allow you using that package.
from subprocess import check_output
import csv, operator
extinction_pct = operator.itemgetter('AOT 500','AOT 675','AOT 870','AOT 936','AOT 1020')
with open('csv_export.csv') as f_csv:
reader = csv.DictReader(f_csv)
for row in reader:
with open("INPUT", 'w') as f_in:
f_in.write("&INPUT\n")
f_in.write("WLINF = 0.250\n") #lower frequency value
f_in.write("WLSUP = 4.0\n") #highest frequency value
f_in.write("WLINC = 0.5\n") #wavelength increment
f_in.write("IDAY = 289\n") #computing for a specific day
#f_in.write("ALAT = {Lat}\n".format(**row)) # for Python versions less than 3.6
f_in.write(f"ALAT = {row['Lat']}\n") #latitude of the location
#f_in.write("ALON = {Long}\n".format(**row)) # for Python versions less than 3.6
f_in.write(f"ALON = {row['Long']}\n") #longitude of the location
f_in.write("IDATM = 3\n") #atmopsheric model 2 - mid latitude summer
f_in.write("ISALB = 5\n") #surface albedo feature
f_in.write("IAER = 5\n") #boundary layer aerosol type selection - 5 - user defined spectral dependance of BLA
f_in.write("WLBAER = .500,.675,.870,.936,1.02\n") #wavelenght points for IAER
f_in.write("WBAER = 5*0.9\n") #single scattering albedo
f_in.write("GBAER = 5*0.8\n") #assymetric factor used with IAER
#f_in.write("TIME = {sama]}\n".format(**row)) # for Python versions less than 3.6
f_in.write(f"TIME = {row['sama']}\n") #Time in IST format (-5.30hr)
#f_in.write("QBAER = {}\n".format(','.join(extinction_pct(row))) # for Python versions less than 3.6
f_in.write(f"QBAER = {','.join(extinction_pct(row))}\n") #extinction efficiency percentage
f_in.write("ZOUT = 0.0,15.0\n") #TOA defining
f_in.write("/\n")
check_output('sbdart >> output1.csv',shell=True) #slarrt is the program, and ouytput.csv is the output file
This is my code, with help from #wwii
My last line, check_output csv doesnt write to my output file at all. What could be the issue?
thanks
sbdart is a program, that takes the INPUT file and outputs in the command line
Using method provided here you can try using this.
import subprocess
proc = subprocess.Popen('cmd.exe', stdin = subprocess.PIPE, stdout = subprocess.PIPE)
stdout, stderr = proc.communicate('sbdart >> output.csv')
Make sure you put the full path of sbdart or navigate to the folder having sbdart or add location of sbdart to system path
There are a bunch of other methods in the link provided
Working on Linux with python 3.5
Assume sbdart is executable and we have a file called output1.csv
sbdart looks like this for our test case:
echo $1
echo "$(cat $1)"
output1.csv is as follows:
&INPUT
WLINF = 0.250
WLSUP = 4.0
WLINC = 0.5
IDAY = 289
ALAT = {row['Lat']}
ALON = {row['Long']}
IDATM = 3
ISALB = 5
IAER = 5
WLBAER = .500,.675,.870,.936,1.02
WBAER = 5*0.9
GBAER = 5*0.8
TIME = {row['sama']}
QBAER = {','.join(extinction_pct(row))}
ZOUT = 0.0,15.0
/
>>> import subprocess
>>> subprocess.check_output(['./sbdart output1.csv'],shell=True)
b"output1.csv\n&INPUT\nWLINF = 0.250\nWLSUP = 4.0\nWLINC = 0.5\nIDAY = 289\nALAT = {row['Lat']}\nALON = {row['Long']}\nIDATM = 3\nISALB = 5\nIAER = 5\nWLBAER = .500,.675,.870,.936,1.02\nWBAER = 5*0.9\nGBAER = 5*0.8\nTIME = {row['sama']}\nQBAER = {','.join(extinction_pct(row))}\nZOUT = 0.0,15.0\n/\n"
>>>