PyQt5 how to start multiprocess but in not main thread - python

I am writing a program that reads data from an excel file to some system via API.
The function that fetches data from excel and sends it via API is in a separate thread to keep the GUI alive.
Loading via API is a long process. Most of the time is sending and waiting for API data.
I wanted to send each excel line in a separate process. How to do it?
def api_import(k, sheet,number_columns, items_number, ok, total, total_success):
item = sheet[get_column_letter(3)+str(k)].value
templ_id = sheet[get_column_letter(number_columns)+str(k)].value
item_id = get_item_id(item)['item_id']
query = {"template_id": str(templ_id)}
resp = templ_attach(item_id, query)
if resp==200:
ok += 1
total += 1
item_create_param = 0
item_update_param = 0
item_error = 0
for l in range(number_columns):
if l <4:
pass
else:
param_name = sheet[get_column_letter(l)+str(1).upper().replace(" ", "")].value
param_description = sheet[get_column_letter(l)+str(2)].value
param_value = sheet[get_column_letter(l)+str(k)].value
param_type = sheet[get_column_letter(l)+str(3)].value
if "[T]" in param_type:
param_type = 4
query = {"name": str(param_name), "type": param_type, "description": str(param_description), "text_value": str(param_value)}
elif "[D]" in param_type:
param_type = 0
query = {"name": str(param_name), "type": param_type, "description": str(param_description), "numeric_value": param_value}
else:
param_type = 1
query = {"name": str(param_name), "type": param_type, "description": str(param_description), "logical_value": param_value}
resp = input_param(item_id, query)
if resp==201:
item_create_param += 1
elif resp==202:
item_update_param += 1
else:
item_error += 1
if item_error == 0:
total_success += 1
return {'total':total, 'total_success':total_success, 'ok':ok}
class MainWindow(QDialog):
def __init__(self):
super(MainWindow,self).__init__()
loadUi("gui.ui",self)
self.browse.clicked.connect(self.browsefiles)
self.load.clicked.connect(self.send)
def browsefiles(self):
fname=QFileDialog.getOpenFileName(self, 'Open file', 'C:\Program files', 'Excel Spreadsheet files (*.xlsx, *.xls)')
self.filename.setText(fname[0])
def send(self):
self.worker = WorkerThread()
self.worker.start()
#self.worker.worker_complete.connect(self.evt_worker_finished)
self.worker.update_progress.connect(self.evt_update_progress)
#def evt_worker_finished(self, emp):
#self.QMessageBox.information(self, "Done!", "Wczytywanie zakonczone!\n\n{} {}".format(emp["fn"], emp["ln"]))
def evt_update_progress(self, val):
self.progressBar.setValue(val)
class WorkerThread(QThread):
update_progress = pyqtSignal(int)
worker_complete = pyqtSignal(dict)
def run(self):
start_time = time.time()
file_path = "import.xlsx"
print(file_path)
wb = load_workbook(file_path)
sheetnames = wb.sheetnames
for sheet in wb:
print(sheet)
number_rows = sheet.max_row
print(number_rows)
number_columns = sheet.max_column
items_number = number_rows-3
ok = 0
total = 0
progress = 0
total_success = 0
for k in range(number_rows+1):
if k>3:
report = api_import(k, sheet,number_columns, items_number, ok, total, total_success)
total = report['total']
ok = report['ok']
total_success = report['total_success']
progress = round(total/items_number*100)
print("{}%".format(progress))
self.update_progress.emit(progress)
self.worker_complete.emit({"emp_id":1234, "fn":"XXX", "ln":"YYYY"})
end_time = time.time() - start_time
item_time = end_time/total

Related

i want to create an endpoint in FASTAPI where i could download a file on excel

i have this endpoint where basically will call the function that will create the excel file
#router.post("/users/export",response_model=schemas.Mesage, status_code=201)
async def get_all_users_from_dashboard(
*,
db: Session = Depends(deps.get_db),
s3: BaseClient = Depends(s3_auth),
background_tasks: BackgroundTasks,
current_user: models.User = Depends(deps.get_current_active_dashboard)
) -> Any:
"""
Dashboard (Update user status all users from dashboard)
"""
background_tasks.add_task(send_report_user, current_user)
response = message("THE REPORT WILL be delivered", 201, {}, {})
return response
and also the function send_report_user is the one who will collect the data from the database and after that with that data will create the excel file
def send_report_user(current_user):
s3: BaseClient = deps.s3_auth()
if crud.user.is_only_superuser(user= current_user):
query = (select(User).where(User.role_id !=4))
elif current_user.department_id == 2:
query = (select(User)).where(and_(User.role_id != 1, User.department_id !=1 ))
else:
query = (select(User)).where(User.department_id == current_user.department_id, User.role_id != 1, User.department_id !=1 )
users = query.order_by(User.last_name)
users_enable = query.filter(User.is_active == True).order_by(User.last_name)
users_disable = query.filter(User.is_active == False).order_by(User.last_name)
save_path = 'C:\miProyecto'
title_columns = ['ID', 'Apellido','Nombre','Departamento','Correo electrónico','Teléfono']
excel_file_name= 'Nomina_AIG_'+ datetime.datetime.now().date().strftime("%d-%m-%Y")+'.xlsx' #nombre del archivo nomina
workbook = xlsxwriter.Workbook(excel_file_name) #creates a workbook
worksheet = workbook.add_worksheet(name='Usuarios activos')
row = 1 #empieza desde la fila 1 columna 0
col = 0
for id, name in enumerate(title_columns):
worksheet.write(0,id, name)
worksheet.set_column(0,id,15)
worksheet = workbook.add_worksheet(name='Usuarios inactivos')
row = 1
col = 0
worksheet.write_row(0, 0, title_columns)
for row_data in users_enable:
data = [row, row_data.id,row_data.last_name,row_data.first_name,row_data.department_id,row_data.email,
row_data.phone]
worksheet.write_row(row, col, data)
row += 1
worksheet = workbook.add_worksheet(name='Usuarios totales')
row = 1
col = 0
worksheet.write_row(0, 0, title_columns)
for row_data in users:
data = [row, row_data.id,row_data.last_name,row_data.first_name,row_data.department_id,row_data.email,
row_data.phone]
worksheet.write_row(row, col, data)
row += 1
workbook.close()
entries = os.listdir('.')
print(entries)
#return create_response(data="hola")
the situation is that isnt working and idk what else i could change

QUERY_EXCEEDED_MAX_MATCHES_ALLOWED error on Kaltura API (Python)

I'm unable to generate all entries in Kaltura. An ApiException with the message "Unable to generate list. max matches value was reached" (Error: QUERY_EXCEEDED_MAX_MATCHES_ALLOWED) gets triggered.
I tried to work around such issue by setting my sessionPrivileges to disableentitlement
class class_chk_integrity():
client = None
pagesize = 0
def __init__(self,worker_num, progress):
self.pagesize = 30
self.worker_num = worker_num
self.progress = progress
config = KalturaConfiguration(2723521)
config.serviceUrl = "https://www.kaltura.com/"
self.client = KalturaClient(config)
ks = self.client.session.start("KALTURA_ADMIN_SECRET",
"email#email.com",
KalturaPluginsCore.KalturaSessionType.ADMIN,
"KALTURA_PARTNER_ID",
432000,
"disableentitlement")
self.client.setKs(ks)
I also tried to filter based on the id's. However, I can't manage to put the filter.idNotIn to work properly.
def get_total_reg(self, cont, lastEntryIds, lastEntryCreatedAt):
filter = KalturaPluginsCore.KalturaBaseEntryFilter()
if lastEntryIds != "":
filter.idNotIn = lastEntryIds
filter.orderBy = KalturaBaseEntryOrderBy.CREATED_AT_DESC
pager = KalturaPluginsCore.KalturaFilterPager()
pageIndex = 1
entriesGot = 0
pager.pageSize = self.pagesize
pager.setPageIndex = pageIndex
result = self.client.baseEntry.list(filter, pager)
totalCount = result.totalCount
if totalCount > 10000:
totalCount = 9970
if totalCount <= 0:
cont = False
while entriesGot < totalCount:
pager.pageSize = self.pagesize
pageIndex += 1
pager.pageIndex = pageIndex
result = self.client.baseEntry.list(filter, pager)
entriesGot += len(result.objects)
for e in result.objects:
if lastEntryIds == "":
lastEntryIds.append(e.id)
else:
lastEntryIds.append(e.id)
lastEntryCreatedAt = e.createdAt
return result.totalCount, self.pagesize, cont, lastEntryIds, lastEntryCreatedAt
This is my how I'm calling the functions
if __name__ == '__main__':
try:
log = _ServiceUtils.log()
log.setup('all', 'integrity')
cont = True
lastEntryIds = []
lastEntryCreatedAt = 0
while cont is True:
kmc = class_chk_integrity(0,0)
kmc_total_reg, kmc_page_size, cont, lastEntryIds, lastEntryCreatedAt = kmc.get_total_reg(cont, lastEntryIds, lastEntryCreatedAt)
interval = 10
max_threads = math.ceil(kmc_total_reg / (interval * kmc_page_size))
# max_threads = 1
threads_list = []
print('TOTAL REG : %s | PAGE_SIZE : %s | INTERVAL : %s | THREADS : %s' % (kmc_total_reg,kmc_page_size,interval,max_threads))
progress = class_progress_thread(max_threads)
for index in range(0,max_threads):
page_ini = index * interval
page_end = index * interval + interval
progress.add_worker_progress(index,datetime.now())
threads_list.append(threading.Thread(target=thread_chk_integrity, args=(index, log, index * interval + 1,index * interval + interval,progress)))
threads_list.append(threading.Thread(target=thread_output_progress, args=(progress,max_threads)))
for thread in threads_list:
thread.start()
for thread in threads_list:
thread.join()
while not progress.stop(): time.sleep(30)
except KeyboardInterrupt:
try:
sys.exit(0)
except SystemExit:
os._exit(0)
I'd appreciate any help with this.
Thank you for your attention.
if totalCount > 10000:
totalCount = 9970
I'm curious to know why you are changing the totalCount this way.
Short answer - paging works as long as the result set is up to 10K.
To work around that, sort the result by creation date (as you did), and when you get to 10K, start with a new search where the created_at date in the filter is the last value you got in the previous search. Reset your paging of course.

Parsing XML and saving Large Data to Django DB

I have a Django function that takes in a Nessus file and then parses the data before saving it to the database, my Nessus file typically has about 30k rows and saving this to the database can take as much as 2 hours, I have tried using bulk_create but this breaks the code, meanwhile I use Django 1.11, is there a way I can speed up these large inserts to the database (postgres)
Here is my code:
def process_nessus_file(*args, **kwargs):
process_obj = kwargs.get('file')
context = kwargs.get('context')
request = kwargs.get('request')
file_obj = process_obj.first()
file_path = file_obj.file.path
context = etree.iterparse(
file_path,
events=('end', ),
tag="ReportHost"
)
total_issues = 0
detected_issues = 0
undetected_issues = 0
already_exist_issue = 0
low_risk_count = 0
medium_risk_count = 0
high_risk_count = 0
critical_risk_count = 0
low_new_issue = 0
medium_new_issue = 0
high_new_issue = 0
critical_new_issue = 0
vul_history = []
for event, elem in context:
first_identified = None
last_seen = None
host = elem.get('name')
logger.info('Processing issue for host : {}'.format(host))
for child in elem:
if child.tag == "HostProperties":
for host_prop_tags in child:
if host_prop_tags.attrib['name'] == "HOST_START":
first_identified = host_prop_tags.text
elif host_prop_tags.attrib['name'] == "HOST_END":
last_seen = host_prop_tags.text
if child.tag == "ReportItem":
main_tags = child.attrib
child_tags = dict()
for ch_tags in child:
if ch_tags.text:
tag_text = ch_tags.text.strip()
else:
tag_text = ch_tags.text
child_tags[ch_tags.tag] = tag_text
if child_tags.get('solution') and \
child_tags.get('solution') in ['n/a', 'N/A']:
child_tags['solution'] = ''
plugin_output = child_tags.get('plugin_output')
pluginid = int(main_tags.get('pluginID'))
if plugin_output and (pluginid == 10107):
if re.search(BANNER_PATTERN, plugin_output):
banner_pattern = plugin_output.replace("{}".\
format(BANNER_PATTERN), "")
banner = banner_pattern.strip()
else:
banner = ''
else:
banner = ''
risk = child_tags.get('risk_factor')
synopsis = child_tags.get('synopsis')
description = child_tags.get('description')
solution = child_tags.get('solution')
protocol = main_tags.get('protocol')
port = main_tags.get('port')
pluginname = main_tags.get('pluginName')
svcname = main_tags.get('svc_type')
try:
host_type = get_host_type(host)
user_host = check_host_exists(host, host_type)
if user_host and not NessusData.objects.filter(
plugin_id=int(pluginid), host=host,
port=int(port), name=pluginname
).exists():
try:
host_link_obj = Host.objects.get(
host=host
)
except Host.MultipleObjectsReturned:
host_link_obj = host.objects.filter(
host=host
).first()
except Host.DoesNotExist:
host_link_obj = Host.objects.create(
host=host,
user_host=user_host
)
nessus_obj = NessusFile.objects.create(
user_host=user_host,
host_link=host_link_obj,
linked_file=file_obj,
plugin_id=int(pluginid),
risk=risk, host=host,
protocol=protocol, port=int(port),
banner=banner, name=pluginname,
svc_type=svcname,
description=description,
first_identified=first_identified,
last_seen=last_seen,
synopsis=synopsis,
plugin_output=plugin_output,
solution=solution
)
issue = "Issue with host {}, port {} and"\
" pluginID {} is added.".\
format(
nessus_obj.host, nessus_obj.port,
nessus_obj.plugin_id
)
NessusFileLog.objects.create(
linked_file=file_obj,
issue_type="new",
issue=issue
)
detected_issues = detected_issues + 1
if risk == 'Medium':
medium_new_issue = medium_new_issue + 1
elif risk == 'Low':
low_new_issue = low_new_issue + 1
elif risk == 'High':
high_new_issue = high_new_issue + 1
elif risk == 'Critical':
critical_new_issue = critical_new_issue + 1
else:
nessus_obj = NessusFile.objects.filter(
plugin_id=int(pluginid), host=host,
port=int(port), name=pluginname
).first()
if nessus_obj and not nessus_obj.last_seen:
nessus_obj.last_seen = last_seen
nessus_obj.save()
issue = "Issue with host {}, port {} and"\
" pluginID {} is already exists.".\
format(host,port, pluginid)
NessusFileLog.objects.create(
linked_file=file_obj,
issue_type="duplicate",
issue=issue
)
already_exist_issue = already_exist_issue + 1
except Exception as e:
pass
if risk == 'Medium':
medium_risk_count = medium_risk_count + 1
elif risk == 'Low':
low_risk_count = low_risk_count + 1
elif risk == 'High':
high_risk_count = high_risk_count + 1
elif risk == 'Critical':
critical_risk_count = critical_risk_count + 1
total_issues = total_issues + 1
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
I heard using raw sql queries will speed it up but I cant wrap my head around the process

Python scrapy multiple crawlers

i have problems with scrapy running multiple crawlers. What i want to achieve:
I have an engine running in the background taking tasks/jobs from the mysql database. Every 15 seconds the mysql db is queried. If there is a new job scrapy should process it.
My setup is working fine so far - the last problem is, that my spiders (scrapy spiders) get "stacked" in the CrawlerRunner.
Start:
def schedule():
jobs = GetJob.Job()
jobs.getJobs()
if __name__ == "__main__":
t = task.LoopingCall(schedule)
t.start(15)
reactor.run()
After GetJob.Job() the Jobs will be processed here:
class ProcessJob():
def processJob(self, job):
#update job
mysql = MysqlConnector.Mysql()
db = mysql.getConnection()
cur = db.cursor()
job.status = 1
update = "UPDATE job SET status=1 WHERE id=" + str(job.id)
cur.execute(update)
db.commit()
db.close()
#Start new crawler
webspider = MySpider.MySpider(job)
#Some settings
ajaxSettings = CrawlerSettings.ajax_settings
normalSettings = CrawlerSettings.normal_settings
configure_logging()
if job.max_pages != 0:
ajaxSettings["CLOSESPIDER_PAGECOUNT"] = 0
ajaxSettings["CLOSESPIDER_ITEMCOUNT"] = job.max_pages
normalSettings["CLOSESPIDER_PAGECOUNT"] = 0
normalSettings["CLOSESPIDER_ITEMCOUNT"] = job.max_pages
#max connections
concurrent_requests = int(job.max_pages / 20)
if concurrent_requests < 1:
concurrent_requests = 10
if concurrent_requests > 500:
concurrent_requests = 500
ajaxSettings["CONCURRENT_REQUESTS"] = concurrent_requests
normalSettings["CONCURRENT_REQUESTS"] = concurrent_requests
#Ajax true or false
if job.ajax == 1:
runner = CrawlerRunner(ajaxSettings)
else:
runner = CrawlerRunner(normalSettings)
d = runner.crawl(webspider, job=job)
And here is my spider:
class MySpider(CrawlSpider):
def __init__(self, job):
#Get the hosts
self.job = job
dispatcher.connect(self.spider_closed, signals.spider_closed)
allowedDomainsPre = job.url.split(",")
allowedDomains = []
for domains in allowedDomainsPre:
parsed_uri = urlparse(domains)
domain = '{uri.netloc}'.format(uri=parsed_uri)
print domain
allowedDomains.append(domain)
self.allowed_domains = allowedDomains
self.start_urls = allowedDomainsPre
#Get job patterns
jobPatterns = job.processing_patterns.split(",")
allowedPatterns = []
deniedPatterns = []
for pattern in jobPatterns:
if '-' in pattern:
deniedPatterns.append(pattern.replace("-", ""))
else:
allowedPatterns.append(pattern)
self._rules = [
Rule(LinkExtractor(allow=(allowedPatterns), deny=(deniedPatterns)), callback=self.parse_items, follow=True)
]
self.name = job.id
self.settings = CrawlerSettings.normal_settings
def spider_closed(self, spider):
stats = spider.crawler.stats.get_stats()
itemCount = 0
try:
itemCount = stats["item_scraped_count"]
except:
print "Item count = zero"
DoneJob.DoneJob().jobDone(self.job, itemCount)
def parse_items(self, response):
item = Item()
#if the user wants a minimum description
if self.job.min_description > 0:
item['html'] = response.body
item['url'] = response.url
item['job_id'] = self.job.id
soup = BeautifulSoup(response.body, 'html.parser')
article = Document(soup.prettify()).summary()
article_soup = BeautifulSoup(article)
text = re.sub(' +', ' ', article_soup.get_text().rstrip())
text_length = len(text.split(' '))
if text_length > self.job.min_description:
return item
else:
item['html'] = response.body
item['url'] = response.url
item['job'] = {}
#Job
item['job']['id'] = self.job.id
item['job']['user_id'] = self.job.user_id
item['job']['name'] = self.job.name
item['job']['url'] = self.job.url
item['job']['api'] = self.job.api
item['job']['max_pages'] = self.job.max_pages
item['job']['crawl_depth'] = self.job.crawl_depth
item['job']['processing_patterns'] = self.job.processing_patterns
item['job']['days'] = self.job.days
item['job']['ajax'] = self.job.ajax
item['job']['min_description'] = self.job.min_description
return item
So after running two or three jobs my spider_closed get called multiple times instead of (expected) once.
So what is wrong here?

API Exporting Issue

I need someone's expertise on this exporting problem of mine.
How it works: Select a camera (animated or not is optional) >> File >> Export Selection >> File Type : .chan (need to load this script as a plugin)
Here's where the problem starts. It is able to create a .text file, however, it is not 'exporting' or writing out the contents into the text file and the file size is of zero bytes.
I am making use of the current API that it has been coded, modifying the code to add in some maya cmds
Can someone kindly help me out?
import math, sys, string, os
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.OpenMayaAnim as OpenMayaAnim
import maya.cmds as cmds
import maya.mel as mel
kPluginTranslatorTypeName = "chan Export/Import"
kVersionNumber = "0.5a"
camSel = []
win_name = "chan_window"
class CustomNodeTranslator(OpenMayaMPx.MPxFileTranslator):
def __init__(self):
OpenMayaMPx.MPxFileTranslator.__init__(self)
def haveWriteMethod(self):
return True
def haveReadMethod(self):
return True
def filter(self):
return " .chan"
def defaultExtension(self):
return "chan"
def writer( self, fileObject, optionString, accessMode ):
try:
fullName = fileObject.fullName()
fileHandle = open(fullName,"w")
selectList = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getActiveSelectionList(selectList)
node = OpenMaya.MObject()
depFn = OpenMaya.MFnDependencyNode()
path = OpenMaya.MDagPath()
iterator = OpenMaya.MItSelectionList(selectList)
animationTime = OpenMayaAnim.MAnimControl()
maxTime = int(animationTime.maxTime().value())
minTime = int(animationTime.minTime().value())
while (iterator.isDone() == 0):
iterator.getDependNode(node)
depFn.setObject(node)
iterator.getDagPath(path, node)
cameraObject = OpenMaya.MFnCamera(path)
transform = OpenMaya.MFnTransform(path)
chanMe = fileExporter(transform, minTime, maxTime, cameraObject)
for all in chanMe():
fileHandle.write(all)
iterator.next()
fileHandle.close()
except:
sys.stderr.write( "Failed to write file information\n")
raise
def processLine( self, lineStr ):
self.importTheChan.writeFrameData(lineStr)
class fileExporter():
""" module for exporting chan files from application. arguments: object, startFrame, endFrame """
def __init__(self, transform, startAnimation, endAnimation, cameraObj):
self.fileExport = []
self.transform = transform
self.cameraObj = cameraObj
self.start = startAnimation
self.end = endAnimation
self.exportWin()
def exportWin(self):
self.expWindow = cmds.window(w=150, h=100, title = "Export Selection" )
cmds.columnLayout( adjustableColumn=True )
form = cmds.formLayout(numberOfDivisions=100)
cmds.radioCollection()
self.chk1 = cmds.radioButton( label='option1', onc = self.opt1On, ofc = self.opt1Off )
self.chk2 = cmds.radioButton( label='option2', onc = self.opt2On, ofc = self.opt2Off )
self.okayBtn = cmds.button(label='okay!', command=self.runSel, width=150, height=35)
cmds.formLayout(form, edit=True, attachForm=[\
(self.chk1, 'top', 15),\
(self.chk1, 'left', 15),\
(self.chk2, 'top', 30),\
(self.chk2, 'left', 15),\
(self.okayBtn, 'top', 50),\
(self.okayBtn, 'left', 15)])
cmds.showWindow( self.expWindow )
def opt1On(self, args):
print "User checked option1"
startAnimation = cmds.playbackOptions(query=True, minTime=True)
endAnimation = cmds.playbackOptions(query=True, maxTime=True)
self.start = startAnimation
self.end = endAnimation
def opt1Off(self, args):
print "User un-checked option1"
cmds.radioButton(self.chk2, edit = True, enable = True)
self.start = ""
self.end = ""
def opt2On(self, args):
print "User checked option2"
startAnimation = cmds.findKeyframe(which='first')
endAnimation = cmds.findKeyframe(which='last')
self.start = startAnimation
self.end = endAnimation
#self.start.append(int(startAnimation))
#self.end.append(int(endAnimation))
def opt2Off(self, args):
print "User un-checked option2"
self.start = ""
self.end = ""
def runSel(self, args):
chkVal1 = cmds.radioButton(self.chk1, query=True, sl=1)
chkVal2 = cmds.radioButton(self.chk2, query=True, sl=1)
if chkVal1 == 1:
print "opt1 Pressed!"
print self.start
print self.end
self.test()
self.closeWindow()
elif chkVal2 == 1:
print "opt2 Pressed!"
print self.start
print self.end
self.test()
self.closeWindow()
else:
cmds.warning("Check an option")
def closeWindow(self):
cmds.deleteUI(self.expWindow, window=True)
def test(self):
self.actualExp(self.transform, self.start, self.end, self.cameraObj)
def actualExp(self, transform, startAnimation, endAnimation, cameraObj):
mayaGlobal = OpenMaya.MGlobal()
mayaGlobal.viewFrame(OpenMaya.MTime(1))
# Converts the float arguement into integer
for i in range(int(startAnimation), int(endAnimation + 1)):
focalLength = cameraObj.focalLength()
vFilmApp = cameraObj.verticalFilmAperture()
focalOut = 2 math.degrees(math.atan(vFilmApp 25.4/ (2 focalLength)))
myEuler = OpenMaya.MEulerRotation()
spc = OpenMaya.MSpace.kWorld
trans = transform.getTranslation(spc)
rotation = transform.getRotation(myEuler)
rotVector = OpenMaya.MVector(myEuler.asVector())
self.fileExport.append((str(i) + '\t' + str(trans[0]) + "\t" + str(trans[1]) + "\t" + str(trans[2]) + "\t" + str(math.degrees(rotVector[0])) + "\t" + str(math.degrees(rotVector[1])) + "\t" + str(math.degrees(rotVector[2])) + "\t" + str(focalOut) + "\n"))
mayaGlobal.viewFrame(OpenMaya.MTime(i+1))
def __call__(self, args):
return self.fileExport
def radianToDegree(self, radians):
outDegrees = 0.0
outDegrees = (float(radians) / (math.pi)) 180
return outDegrees
# creator
def translatorCreator():
return OpenMayaMPx.asMPxPtr( CustomNodeTranslator() )
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.registerFileTranslator(kPluginTranslatorTypeName, None, translatorCreator)
except:
sys.stderr.write( "Failed to register translator: %s" % kPluginTranslatorTypeName )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterFileTranslator( kPluginTranslatorTypeName )
except:
sys.stderr.write( "Failed to deregister translator: %s" % kPluginTranslatorTypeName )
raise
the __call__ method is what's supposed to provide the contents of the file. It returns self.fileExport, which is an empty list that is not getting populated.
The problem here is the writer method of the plugin will not wait for your exportWin UI to return the user inputs when you call
chanMe = fileExporter(transform, minTime, maxTime, cameraObject)
By the time the user has entered the inputs, the statements that follow have already been executed:
for all in chanMe():
fileHandle.write(all)
iterator.next()
fileHandle.close()
That is why plugin-based file exporters like these have their options UI tucked away in the option box. These options will be passed prior to call to the plugin's writer().
You will need to export your options UI code (in a certain specific format) using MEL in another script file and specify the name of that file in the optionsScriptName param of the registerFileTranslator call. There is a communication protocol that needs to be followed for communication between this options UI and the writer plugin itself. RobTheBloke's awesome post illustrates this process.
Ideally, the writer() method should have all the details it needs for computing and exporting without having to wait for user input.
Alternatively, if you prefer to have your own UI window and more control over the flow of things, you could write the exporter not as a plugin, but as a simple MEL/Python module. You could still use the power of the API.
Hope this helped!

Categories