I am trying to use stockfish to evaluate a chess position using FEN notation all in Python. I am mainly using two libraries (pgnToFen I found on github here: https://github.com/SindreSvendby/pgnToFen and Stockfish the MIT licensed one here: https://github.com/zhelyabuzhsky/stockfish). After many bugs I have reached problem after problem. Stockfish not only can't analyse this FEN position (3b2k1/1p3pp1/8/3pP1P1/pP3P2/P2pB3/6K1/8 b f3 -) but it infinitely loops! "No worries!" and thought changing the source code would be accomplishable. Changed to _put(), but basically I am unable to put dummy values in because stdin.flush() won't execute once I give it those values! Meaning I don't even think I can skip to the next row in my dataframe. :( The code I changed is below.
def _put(self, command: str, tmp_time) -> None:
if not self.stockfish.stdin:
raise BrokenPipeError()
self.stockfish.stdin.write(f"{command}\n")
try:
self.stockfish.stdin.flush()
except:
if command != "quit":
self.stockfish.stdin.write('isready\n')
try:
time.sleep(tmp_time)
self.stockfish.stdin.flush()
except:
#print ('Imma head out', file=sys.stderr)
raise ValueError('Imma head out...')
#sys.stderr.close()
def get_evaluation(self) -> dict:
"""Evaluates current position
Returns:
A dictionary of the current advantage with "type" as "cp" (centipawns) or "mate" (checkmate in)
"""
evaluation = dict()
fen_position = self.get_fen_position()
if "w" in fen_position: # w can only be in FEN if it is whites move
compare = 1
else: # stockfish shows advantage relative to current player, convention is to do white positive
compare = -1
self._put(f"position {fen_position}", 5)
self._go()
x=0
while True:
x=x+1
text = self._read_line()
#print(text)
splitted_text = text.split(" ")
if splitted_text[0] == "info":
for n in range(len(splitted_text)):
if splitted_text[n] == "score":
evaluation = {
"type": splitted_text[n + 1],
"value": int(splitted_text[n + 2]) * compare,
}
elif splitted_text[0] == "bestmove":
return evaluation
elif x == 500:
evaluation = {
"type": 'cp',
"value": 10000,
}
return evaluation
and last but not least change to the init_ contructor below:
self._stockfish_major_version: float = float(self._read_line().split(" ")[1])
And the code where I am importing this code to is below, this is where errors pop up.
import pandas as pd
import re
import nltk
import numpy as np
from stockfish import Stockfish
import os
import sys
sys.path.insert(0, r'C:\Users\path\to\pgntofen')
import pgntofen
#nltk.download('punkt')
#Changed models.py for major version line 39 in stockfish from int to float
stockfish = Stockfish(r"C:\Users\path\to\Stockfish.exe")
file = r'C:\Users\path\to\selenium-pandas output.csv'
chunksize = 10 ** 6
for chunk in pd.read_csv(file, chunksize=chunksize):
for index, row in chunk.iterrows():
FullMovesStr = str(row['FullMoves'])
FullMovesStr = FullMovesStr.replace('+', '')
if "e.p" in FullMovesStr:
row.to_csv(r'C:\Users\MyName\Logger.csv', header=None, index=False, mode='a')
print('Enpassant')
continue
tokens = nltk.word_tokenize(FullMovesStr)
movelist = []
for tokenit in range(len(tokens)):
if "." in str(tokens[tokenit]):
try:
tokenstripped = re.sub(r"[0-9]+\.", "", tokens[tokenit])
token = [tokenstripped, tokens[tokenit+1]]
movelist.append(token)
except:
continue
else:
continue
DFMoves = pd.DataFrame(movelist, columns=[['WhiteMove', 'BlackMove']])
DFMoves['index'] = row['index']
DFMoves['Date'] = row['Date']
DFMoves['White'] = row['White']
DFMoves['Black'] = row['Black']
DFMoves['W ELO'] = row['W ELO']
DFMoves['B ELO'] = row['B ELO']
DFMoves['Av ELO'] = row['Av ELO']
DFMoves['Event'] = row['Event']
DFMoves['Site'] = row['Site']
DFMoves['ECO'] = row['ECO']
DFMoves['Opening'] = row['Opening']
pd.set_option('display.max_rows', DFMoves.shape[0]+1)
print(DFMoves[['WhiteMove', 'BlackMove']])
seqmoves = []
#seqmovesBlack = []
evalmove = []
pgnConverter = pgntofen.PgnToFen()
#stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
#rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1
for index, row in DFMoves.iterrows():
try:
stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
except:
evalmove.append("?")
continue
#stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
pgnConverter.resetBoard()
WhiteMove = str(row['WhiteMove'])
BlackMove = str(row['BlackMove'])
if index == 0:
PGNMoves1 = [WhiteMove]
seqmoves.append(WhiteMove)
#seqmoves.append(BlackMove)
else:
seqmoves.append(WhiteMove)
#seqmoves.append(BlackMove)
PGNMoves1 = seqmoves.copy()
#print(seqmoves)
try:
pgnConverter.pgnToFen(PGNMoves1)
fen = pgnConverter.getFullFen()
except:
break
try:
stockfish.set_fen_position(fen)
print(stockfish.get_board_visual())
evalpos = stockfish.get_evaluation()
evalmove.append(evalpos)
except:
pass
try:
stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
except:
evalmove.append("?")
continue
pgnConverter.resetBoard()
if index == 0:
PGNMoves2 = [WhiteMove, BlackMove]
seqmoves.append(BlackMove)
else:
seqmoves.append(BlackMove)
PGNMoves2 = seqmoves.copy()
try:
pgnConverter.pgnToFen(PGNMoves2)
fen = pgnConverter.getFullFen()
except:
break
try:
stockfish.set_fen_position(fen)
print(stockfish.get_board_visual())
evalpos = stockfish.get_evaluation()
print(evalpos)
evalmove.append(evalpos)
except:
pass
#DFMoves['EvalWhite'] = evalwhite
#DFMoves['EvalBlack'] = evalblack
print(evalmove)
So the detailed question is getting stockfish.get_evalution() to just skip, or better yet fix the problem, for this ( 3b2k1/1p3pp1/8/3pP1P1/pP3P2/P2pB3/6K1/8 b f3 - ) FEN position. I have been working on this problem for quite a while so any insight into this would be very much appreciated.
My specs are Windows 10, Python 3.9, Processor:Intel(R) Core(TM) i9-10980XE CPU # 3.00GHz 3.00 GHz and RAM is 64.0 GB.
Thanks :)
Ok. It seems your fen is invalid (3b2k1/1p3pp1/8/3pP1P1/pP3P2/P2pB3/6K1/8 b f3 -). So check that. And python-chess (https://python-chess.readthedocs.io/en/latest/index.html) library allows you to use FEN AND chess engines. So, pretty cool no ? Here is an example of theses two fantastics tools :
import chess
import chess.engine
import chess.pgn
pgn = open("your_pgn_file.pgn")
game = chess.pgn.read_game(pgn)
engine = chess.engine.SimpleEngine.popen_uci("your_stockfish_path.exe")
# Iterate through all moves, play them on a board and analyse them.
board = game.board()
for move in game.mainline_moves():
board.push(move)
print(engine.analyse(board, chess.engine.Limit(time=0.1))["score"])
Related
I'm writing a code to generate features of ligands. Code is:
def parsePDB(self):
top = self.pdb.topology
residues = [str(residue)[:3] for residue in top.residues]
residues_cp = residues.copy()
# number of all groups in the complex except the ligand
LIG_number = residues.index('LIG')
self.residues_indices = [i for i in range(top.n_residues) if i != LIG_number]
# Get the types of atoms in the protein and ligand
self.rec_indices = top.select('protein')
self.lig_indices = top.select('resname LIG')
table, bond = top.to_dataframe()
self.all_ele = table['element']
self.lig_ele = table['element'][self.lig_indices]
H_num = []
for num, i in enumerate(self.all_ele):
if i == 'H':
H_num.append(num)
# Get the serial number of the atom in each residue or group
removes = []
for i in self.residues_indices:
atoms = top.residue(i).atoms
each_atoms = [j.index for j in atoms]
heavy_atoms = [x for x in each_atoms if not x in H_num]
if len(heavy_atoms) == 0:
removes.append(i)
else:
self.atoms_indices.append(heavy_atoms)
if len(removes) != 0:
for i in removes:
self.residues_indices.remove(i)
self.residues = [residues_cp[x] for x in self.residues_indices]
# Get the 3D coordinates for all atoms
self.xyz = self.pdb.xyz[0]
return self
I'm getting this error:
OSError: The topology is loaded by filename extension, and the detected ".pd" format is not supported. Supported topology formats include ".pdb", ".pdb.gz", ".h5", ".lh5", ".prmtop", ".parm7", ".prm7", ".psf", ".mol2", ".hoomdxml", ".gro", ".arc", ".hdf5" and ".gsd".
Can anyone please suggest me how to resolve this issue? Thanks
I need to get the same results in any iteration. I tried to use random.seed() in my script, but it doesn't work. How can I fix it?
My script:
import statistics
Oddr=[]
Oddr_fem=[]
Oddr_mal=[]
chi = []
chi_fem=[]
chi_mal=[]
for k in range(100):
random.seed(10)
result = []
exclude_hlthy = []
for i in set(sick['predicted_age']):
sick_ppl = sick.index[sick['predicted_age'] == i].tolist()
L_sick = len(sick_ppl)
if L_sick == 0:
continue
hlth_peers = healthy[healthy.predicted_age == i]
L_healthy = hlth_peers.shape[0]
if L_healthy < len(sick_ppl):
pass
else:
hlthy_subsample = list(np.random.choice([x for x in hlth_peers.index if not x in exclude_hlthy],
L_sick, replace = False))
exclude_hlthy += hlthy_subsample
result += hlthy_subsample
table_ready = healthy.loc[result]
whole_table = table_ready.append(sick, ignore_index=False)
cross_tab = pd.crosstab(index=whole_table['dc013'], columns=whole_table['rate_aging'])
oddsratio=(cross_tab[1][1]*cross_tab[0][0])/(cross_tab[1][0]*cross_tab[0][1])
#Oddr += oddsratio
Oddr.append(oddsratio)
In this script I got several random tables whole_table from one subsample.
For every iteration you're creating a new random.seed(10) so try to put this line of code out of the for cycle and then should work
I am trying to fill with Python a table in Word with DocxTemplate and I have some issues to do it properly. I want to use 2 dictionnaries to fill the data in 1 table, in the figure below.
Table to fill
The 2 dictionnaries are filled in a loop and I write the template document at the end.
The input document to create my dictionnaries is an DB extraction written in SQL.
My main issue is when I want to fill the table with my data in the 2 different dictionnaries.
In the code below I will give as an example the 2 dictionnaries with values in it.
# -*- coding: utf8 -*-
#
#
from docxtpl import DocxTemplate
if __name__ == "__main__":
document = DocxTemplate("template.docx")
DicoOccuTable = {'`num_carnet_adresses`': '`annuaire_telephonique`\n`carnet_adresses`\n`carnet_adresses_complement',
'`num_eleve`': '`CFA_apprentissage_ctrl_coherence`\n`CFA_apprentissage_ctrl_examen`}
DicoChamp = {'`num_carnet_adresses`': 72, '`num_eleve`': 66}
template_values = {}
#
template_values["keys"] = [[{"name":cle, "occu":val} for cle,val in DicoChamp.items()],
[{"table":vals} for cles,vals in DicoOccuTable.items()]]
#
document.render(template_values)
document.save('output/' + nomTable.replace('`','') + '.docx')
As a result the two lines for the table are created but nothing is written within...
I would like to add that it's only been 1 week that I work on Python, so I feel that I don't manage properly the different objects here.
If you have any suggestion to help me, I would appreciate it !
I put here the loop to create the dictionnaries, it may help you to understand why I coded it wrong :)
for c in ChampList:
with open("db_reference.sql", "r") as f:
listTable = []
line = f.readlines()
for l in line:
if 'CREATE TABLE' in l:
begin = True
linecreateTable = l
x = linecreateTable.split()
nomTable = x[2]
elif c in l and begin == True:
listTable.append(nomTable)
elif ') ENGINE=MyISAM DEFAULT CHARSET=latin1;' in l:
begin = False
nbreOccu=len(listTable)
Tables = "\n".join(listTable)
DicoChamp.update({c:nbreOccu})
DicoOccuTable.update({c:Tables})
# DicoChamp = {c:nbreOccu}
template_values = {}
Thank You very much !
Finally I found a solution for this problem. Here it is.
Instead of using 2 dictionnaries I created 1 dictionnary with this strucuture :
Dico = { Champ : [Occu , Tables] }
The full code for creating the table is detailed below :
from docxtpl import DocxTemplate
document = DocxTemplate("template.docx")
template_values = {}
Context = {}
for c in ChampList:
listTable = []
nbreOccu = 0
OccuTables = []
with open("db_reference.sql", "r") as g:
listTable = []
ligne = g.readlines()
for li in ligne:
if 'CREATE TABLE' in li:
begin = True
linecreateTable2 = li
y = linecreateTable2.split()
nomTable2 = y[2]
elif c in li and begin == True:
listTable.append(nomTable2)
elif ') ENGINE=MyISAM DEFAULT CHARSET=latin1;' in li:
begin = False
elif '/*!40101 SET COLLATION_CONNECTION=#OLD_COLLATION_CONNECTION */;' in li:
nbreOccu=len(listTable)
inter = "\n".join(listTable)
OccuTables.append(nbreOccu)
OccuTables.append(inter)
ChampNumPropre = c.replace('`','')
Context.update({ChampNumPropre:OccuTables})
else:
continue
template_values["keys"] = [{"label":cle, "cols":val} for cle,val in Context.items()]
#
document.render(template_values)
document.save('output/' + nomTable.replace('`','') + '.docx')
And I used a table with the following structure :
I hope you will find your answers here and good luck !
I am using the parallel programming module for python I have a function that returns me an array but when I print the variable that contain the value of the function parallelized returns me "pp._Task object at 0x04696510" and not the value of the matrix.
Here is the code:
from __future__ import print_function
import scipy, pylab
from scipy.io.wavfile import read
import sys
import peakpicker as pea
import pp
import fingerprint as fhash
import matplotlib
import numpy as np
import tdft
import subprocess
import time
if __name__ == '__main__':
start=time.time()
#Peak picking dimensions
f_dim1 = 30
t_dim1 = 80
f_dim2 = 10
t_dim2 = 20
percentile = 80
base = 100 # lowest frequency bin used (peaks below are too common/not as useful for identification)
high_peak_threshold = 75
low_peak_threshold = 60
#TDFT parameters
windowsize = 0.008 #set the window size (0.008s = 64 samples)
windowshift = 0.004 #set the window shift (0.004s = 32 samples)
fftsize = 1024 #set the fft size (if srate = 8000, 1024 --> 513 freq. bins separated by 7.797 Hz from 0 to 4000Hz)
#Hash parameters
delay_time = 250 # 250*0.004 = 1 second#200
delta_time = 250*3 # 750*0.004 = 3 seconds#300
delta_freq = 128 # 128*7.797Hz = approx 1000Hz#80
#Time pair parameters
TPdelta_freq = 4
TPdelta_time = 2
#Cargando datos almacenados
database=np.loadtxt('database.dat')
songnames=np.loadtxt('songnames.dat', dtype=str, delimiter='\t')
separator = '.'
print('Please enter an audio sample file to identify: ')
userinput = raw_input('---> ')
subprocess.call(['ffmpeg','-y','-i',userinput, '-ac', '1','-ar', '8k', 'filesample.wav'])
sample = read('filesample.wav')
userinput = userinput.split(separator,1)[0]
print('Analyzing the audio sample: '+str(userinput))
srate = sample[0] #sample rate in samples/second
audio = sample[1] #audio data
spectrogram = tdft.tdft(audio, srate, windowsize, windowshift, fftsize)
mytime = spectrogram.shape[0]
freq = spectrogram.shape[1]
print('The size of the spectrogram is time: '+str(mytime)+' and freq: '+str(freq))
threshold = pea.find_thres(spectrogram, percentile, base)
peaks = pea.peak_pick(spectrogram,f_dim1,t_dim1,f_dim2,t_dim2,threshold,base)
print('The initial number of peaks is:'+str(len(peaks)))
peaks = pea.reduce_peaks(peaks, fftsize, high_peak_threshold, low_peak_threshold)
print('The reduced number of peaks is:'+str(len(peaks)))
#Store information for the spectrogram graph
samplePeaks = peaks
sampleSpectro = spectrogram
hashSample = fhash.hashSamplePeaks(peaks,delay_time,delta_time,delta_freq)
print('The dimensions of the hash matrix of the sample: '+str(hashSample.shape))
# tuple of all parallel python servers to connect with
ppservers = ()
#ppservers = ("10.0.0.1",)
if len(sys.argv) > 1:
ncpus = int(sys.argv[1])
# Creates jobserver with ncpus workers
job_server = pp.Server(ncpus, ppservers=ppservers)
else:
# Creates jobserver with automatically detected number of workers
job_server = pp.Server(ppservers=ppservers)
print ("Starting pp with", job_server.get_ncpus(), "workers")
print('Attempting to identify the sample audio clip.')
Here I call the function in fingerprint, the commented line worked, but when I try parallelize don't work:
timepairs = job_server.submit(fhash.findTimePairs, (database, hashSample, TPdelta_freq, TPdelta_time, ))
# timepairs = fhash.findTimePairs(database, hashSample, TPdelta_freq, TPdelta_time)
print (timepairs)
#Compute number of matches by song id to determine a match
numSongs = len(songnames)
songbins= np.zeros(numSongs)
numOffsets = len(timepairs)
offsets = np.zeros(numOffsets)
index = 0
for i in timepairs:
offsets[index]=i[0]-i[1]
index = index+1
songbins[i[2]] += 1
# Identify the song
#orderarray=np.column_stack((songbins,songnames))
#orderarray=orderarray[np.lexsort((songnames,songbins))]
q3=np.percentile(songbins, 75)
q1=np.percentile(songbins, 25)
j=0
for i in songbins:
if i>(q3+(3*(q3-q1))):
print("Result-> "+str(i)+":"+songnames[j])
j+=1
end=time.time()
print('Tiempo: '+str(end-start)+' s')
print("Time elapsed: ", +time.time() - start, "s")
fig3 = pylab.figure(1003)
ax = fig3.add_subplot(111)
ind = np.arange(numSongs)
width = 0.35
rects1 = ax.bar(ind,songbins,width,color='blue',align='center')
ax.set_ylabel('Number of Matches')
ax.set_xticks(ind)
xtickNames = ax.set_xticklabels(songnames)
matplotlib.pyplot.setp(xtickNames)
pylab.title('Song Identification')
fig3.show()
pylab.show()
print('The sample song is: '+str(songnames[np.argmax(songbins)]))
The function in fingerprint that I try to parallelize is:
def findTimePairs(hash_database,sample_hash,deltaTime,deltaFreq):
"Find the matching pairs between sample audio file and the songs in the database"
timePairs = []
for i in sample_hash:
for j in hash_database:
if(i[0] > (j[0]-deltaFreq) and i[0] < (j[0] + deltaFreq)):
if(i[1] > (j[1]-deltaFreq) and i[1] < (j[1] + deltaFreq)):
if(i[2] > (j[2]-deltaTime) and i[2] < (j[2] + deltaTime)):
timePairs.append((j[3],i[3],j[4]))
else:
continue
else:
continue
else:
continue
return timePairs
The complete error is:
Traceback (most recent call last):
File "analisisPrueba.py", line 93, in <module>
numOffsets = len(timepairs)
TypeError: object of type '_Task' has no len()
The submit() method submits a task to the server. What you get back is a reference to the task, not its result. (How could it return its result? submit() returns before any of that work has been done!) You should instead provide a callback function to receive the results. For example, timepairs.append is a function that will take the result and append it to the list timepairs.
timepairs = []
job_server.submit(fhash.findTimePairs, (database, hashSample, TPdelta_freq, TPdelta_time, ), callback=timepairs.append)
(Each findTimePairs call should calculate one result, in case that isn't obvious, and you should submit multiple tasks. Otherwise you're invoking all the machinery of Parallel Python for no reason. And make sure you call job_server.wait() to wait for all the tasks to finish before trying to do anything with your results. In short, read the documentation and some example scripts and make sure you understand how it works.)
I have a question for some of you who are familiar with the Revit API and python:
I’ve been using the spring nodes package in dynamo to create a rather large series of freeform objects each in their own family. The way that the FamilyInstance.ByGeometry works, it takes a list of solids and creates a family instance for each using a template family file. The result is quite good. (spring nodes can be found here: https://github.com/dimven/SpringNodes)
However, the drawback is that that now I have roughly 200 separate instances, so to make changes to each is rather painful. I thought at first it would be possible to use dynamo to create a new subcategory and set the solid inside each family instance to this new subcategory. Unfortunately, I realized this is not possible since dynamo cannot be open in two different Revit environments simultaneously (the project I am working in and each instance of the family). This leads me to look to see if I can do this using python.
I have used python in rhino and can get along pretty well, I am still learning the Revit API however. But basically my idea would be to:
1. select a series of family instances in the Revit project environment
2. loop through each instance
3. save it to a specified location
4. create a new subcategory in each family instances (the subcategory would the same for all the selected family instances)
5. select the solid in each in instance
6. set the solid to this newly created subcategory
7. close the family instance and save
My question for you is does this sound like it is achievable based on your knowledge of the Revit API?
Many thanks for your time and advice.
UPDATE:
I've found a section in the revit api that describes what i'm looking to do: http://help.autodesk.com/view/RVT/2015/ENU/?guid=GUID-FBF9B994-ADCB-4679-B50B-2E9A1E09AA48
I've made a first pass at inserting this into the python code of the dynamo node. The rest of the code works fine except for the new section im adding (see below). Please excuse the variables, I am simply keeping with logic of the original author of the code i am hacking:
(Note: the variables come in are in arrays)
#set subcategory
try:
#create new sucategory
fam_subcat = famdoc.Settings.Categories.NewSubcategory(fam_cat, get_Item(subcat1.Name))
#assign the mataterial(fam_mat.Id) to the subcategory
fam_subcat.Material = famdoc.GetElement(fam_mat.Id)
#assign the subcategory to the element (s2)
s2.Subcategory = fam_subcat
except: pass
Any help or advice with this section of code would be much appreciated.
UPDATE:
See full code below for context of the section in question:
#Copyright(c) 2015, Dimitar Venkov
# #5devene, dimitar.ven#gmail.com
import clr
import System
from System.Collections.Generic import *
pf_path = System.Environment.GetFolderPath(System.Environment.SpecialFolder.ProgramFilesX86)
import sys
sys.path.append("%s\IronPython 2.7\Lib" %pf_path)
import traceback
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
app = DocumentManager.Instance.CurrentUIApplication.Application
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Structure import StructuralType
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.ImportExtensions(Revit.GeometryConversion)
def tolist(obj1):
if hasattr(obj1,"__iter__"): return obj1
else: return [obj1]
def output1(l1):
if len(l1) == 1: return l1[0]
else: return l1
def PadLists(lists):
len1 = max([len(l) for l in lists])
for i in xrange(len(lists)):
if len(lists[i]) == len1:
continue
else:
len2 = len1 - len(lists[i])
for j in xrange(len2):
lists[i].append(lists[i][-1])
return lists
class FamOpt1(IFamilyLoadOptions):
def __init__(self):
pass
def OnFamilyFound(self,familyInUse, overwriteParameterValues):
return True
def OnSharedFamilyFound(self,familyInUse, source, overwriteParameterValues):
return True
geom = tolist(IN[0])
fam_path = IN[1]
names = tolist(IN[2])
category = tolist(IN[3])
material = tolist(IN[4])
isVoid = tolist(IN[5])
subcategory = tolist(IN[6])
isRvt2014 = False
if app.VersionName == "Autodesk Revit 2014": isRvt2014 = True
units = doc.GetUnits().GetFormatOptions(UnitType.UT_Length).DisplayUnits
factor = UnitUtils.ConvertToInternalUnits(1,units)
acceptable_views = ["ThreeD", "FloorPlan", "EngineeringPlan", "CeilingPlan", "Elevation", "Section"]
origin = XYZ(0,0,0)
str_typ = StructuralType.NonStructural
def NewForm_background(s1, name1, cat1, isVoid1, mat1, subcat1):
t1 = TransactionManager.Instance
TransactionManager.ForceCloseTransaction(t1)
famdoc = doc.Application.NewFamilyDocument(fam_path)
message = None
temp_path = System.IO.Path.GetTempPath()
sat_path = "%s%s.sat" % (temp_path, name1)
try:
if factor != 1:
s1 = s1.Scale(factor)
sat1 = Geometry.ExportToSAT(s1, sat_path)
satOpt = SATImportOptions()
satOpt.Placement = ImportPlacement.Origin
satOpt.Unit = ImportUnit.Foot
view_fec = FilteredElementCollector(famdoc).OfClass(View)
view1 = None
for v in view_fec:
if str(v.ViewType) in acceptable_views:
view1 = v
break
t1.EnsureInTransaction(famdoc)
satId = famdoc.Import(sat1, satOpt, view1)
opt1 = Options()
opt1.ComputeReferences = True
el1 = famdoc.GetElement(satId)
geom1 = el1.get_Geometry(opt1)
enum = geom1.GetEnumerator()
enum.MoveNext()
geom2 = enum.Current.GetInstanceGeometry()
enum2 = geom2.GetEnumerator()
enum2.MoveNext()
s1 = enum2.Current
famdoc.Delete(satId)
TransactionManager.ForceCloseTransaction(t1)
System.IO.File.Delete(sat_path)
except:
message = traceback.format_exc()
pass
if message == None:
try:
save_path = "%s%s.rfa" % (temp_path, name1)
SaveAsOpt = SaveAsOptions()
SaveAsOpt.OverwriteExistingFile = True
t1.EnsureInTransaction(famdoc)
#set the category
try:
fam_cat = famdoc.Settings.Categories.get_Item(cat1.Name)
famdoc.OwnerFamily.FamilyCategory = fam_cat
except: pass
s2 = FreeFormElement.Create(famdoc,s1)
if isVoid1:
void_par = s2.get_Parameter("Solid/Void")
void_par.Set(1)
void_par2 = famdoc.OwnerFamily.get_Parameter("Cut with Voids When Loaded")
void_par2.Set(1)
else: #voids do not have a material value
try:
mat_fec = FilteredElementCollector(famdoc).OfClass(Material)
for m in mat_fec:
if m.Name == mat1:
fam_mat = m
break
mat_par = s2.get_Parameter("Material")
mat_par.Set(fam_mat.Id)
except: pass
#set subcategory
try:
#create new sucategory
fam_subcat = document.Settings.Categories.NewSubcategory(document.OwnerFamily.FamilyCategory, get_Item(subcat1.Name))
#assign the mataterial(fam_mat.Id) to the subcategory
fam_subcat.Material = famdoc.GetElement(fam_mat.Id)
#assign the subcategory to the element (s2)
s2.Subcategory = fam_subcat
except: pass
TransactionManager.ForceCloseTransaction(t1)
famdoc.SaveAs(save_path, SaveAsOpt)
family1 = famdoc.LoadFamily(doc, FamOpt1())
famdoc.Close(False)
System.IO.File.Delete(save_path)
symbols = family1.Symbols.GetEnumerator()
symbols.MoveNext()
symbol1 = symbols.Current
t1.EnsureInTransaction(doc)
if not symbol1.IsActive: symbol1.Activate()
inst1 = doc.Create.NewFamilyInstance(origin, symbol1, str_typ)
TransactionManager.ForceCloseTransaction(t1)
return inst1.ToDSType(False), family1.ToDSType(False)
except:
message = traceback.format_exc()
return message
else:
return message
def NewForm_background_R16(s1, name1, cat1, isVoid1, mat1, subcat1):
t1 = TransactionManager.Instance
TransactionManager.ForceCloseTransaction(t1)
famdoc = doc.Application.NewFamilyDocument(fam_path)
message = None
temp_path = System.IO.Path.GetTempPath()
sat_path = "%s%s.sat" % (temp_path, name1)
try:
if factor != 1:
s1 = s1.Scale(factor)
sat1 = Geometry.ExportToSAT(s1, sat_path)
satOpt = SATImportOptions()
satOpt.Placement = ImportPlacement.Origin
satOpt.Unit = ImportUnit.Foot
view_fec = FilteredElementCollector(famdoc).OfClass(View)
view1 = None
for v in view_fec:
if str(v.ViewType) in acceptable_views:
view1 = v
break
t1.EnsureInTransaction(famdoc)
satId = famdoc.Import(sat1, satOpt, view1)
opt1 = Options()
opt1.ComputeReferences = True
el1 = famdoc.GetElement(satId)
geom1 = el1.get_Geometry(opt1)
enum = geom1.GetEnumerator()
enum.MoveNext()
geom2 = enum.Current.GetInstanceGeometry()
enum2 = geom2.GetEnumerator()
enum2.MoveNext()
s1 = enum2.Current
famdoc.Delete(satId)
TransactionManager.ForceCloseTransaction(t1)
System.IO.File.Delete(sat_path)
except:
message = traceback.format_exc()
pass
if message == None:
try:
save_path = "%s%s.rfa" % (temp_path, name1)
SaveAsOpt = SaveAsOptions()
SaveAsOpt.OverwriteExistingFile = True
t1.EnsureInTransaction(famdoc)
#set the category
try:
fam_cat = famdoc.Settings.Categories.get_Item(cat1.Name)
famdoc.OwnerFamily.FamilyCategory = fam_cat
except: pass
s2 = FreeFormElement.Create(famdoc,s1)
if isVoid1:
void_par = s2.LookupParameter("Solid/Void")
void_par.Set(1)
void_par2 = famdoc.OwnerFamily.LookupParameter("Cut with Voids When Loaded")
void_par2.Set(1)
else: #voids do not have a material value
try:
mat_fec = FilteredElementCollector(famdoc).OfClass(Material)
for m in mat_fec:
if m.Name == mat1:
fam_mat = m
break
mat_par = s2.LookupParameter("Material")
mat_par.Set(fam_mat.Id)
except: pass
#apply same subcategory code as before
#set subcategory
try:
#create new sucategory
fam_subcat = famdoc.Settings.Categories.NewSubcategory(fam_cat, get_Item(subcat1.Name))
#assign the mataterial(fam_mat.Id) to the subcategory
fam_subcat.Material = famdoc.GetElement(fam_mat.Id)
#assign the subcategory to the element (s2)
s2.Subcategory = fam_subcat
except: pass
TransactionManager.ForceCloseTransaction(t1)
famdoc.SaveAs(save_path, SaveAsOpt)
family1 = famdoc.LoadFamily(doc, FamOpt1())
famdoc.Close(False)
System.IO.File.Delete(save_path)
symbols = family1.GetFamilySymbolIds().GetEnumerator()
symbols.MoveNext()
symbol1 = doc.GetElement(symbols.Current)
t1.EnsureInTransaction(doc)
if not symbol1.IsActive: symbol1.Activate()
inst1 = doc.Create.NewFamilyInstance(origin, symbol1, str_typ)
TransactionManager.ForceCloseTransaction(t1)
return inst1.ToDSType(False), family1.ToDSType(False)
except:
message = traceback.format_exc()
return message
else:
return message
if len(geom) == len(names) == len(category) == len(isVoid) == len(material) == len(subcategory):
if isRvt2014:
OUT = output1(map(NewForm_background, geom, names, category, isVoid, material, subcategory))
else:
OUT = output1(map(NewForm_background_R16, geom, names, category, isVoid, material, subcategory))
elif len(geom) == len(names):
padded = PadLists((geom, category, isVoid, material, subcategory))
p_category = padded[1]
p_isVoid = padded[2]
p_material = padded[3]
p_subcategory = padded [4]
if isRvt2014:
OUT = output1(map(NewForm_background, geom, names, p_category, p_isVoid, p_material, p_subcategory))
else:
OUT = output1(map(NewForm_background_R16, geom, names, p_category, p_isVoid, p_material, subcategory))
else: OUT = "Make sure that each geometry\nobject has a unique family name."
Update:
Was able to get it working:
try:
#create new sucategory
fam_subcat = famdoc.Settings.Categories.NewSubcategory(famdoc.OwnerFamily.FamilyCategory, subcat1)
#assign the mataterial(fam_mat.Id) to the subcategory
#fam_subcat.Material = famdoc.GetElement(fam_mat.Id)
#assign the subcategory to the element (s2)
s2.Subcategory = fam_subcat
except: pass
As I answered on your initial query per email, what you are aiming for sounds perfectly feasible to me in the Revit API. Congratulations on getting as far as you have. Looking at the link to the Revit API help file and developer guide that you cite above, it seems that the code has to be executed in the family document while defining the family. The context in which you are trying to execute it is not clear. Have you used EditFamily to open the family definition document? What context are you executing in?