problem while creating a filepath using variables - python

I want to create a filepath using a variable i want to do it like this
import os
import gettitle as gt
def convert_file(
oldfilelocation = 'C:/Muziek' + (gt.finalyoutubetitle) + '.m4a'
newfilefilelocation = 'C:/Muziek' + (gt.finalyoutubetitle) + '.mmp3'
os.rename(r(oldfilelocation),r(newfilelocation))
)
exit()
But if i do this is get a lot of errors shown below.

The correct syntax is
2 method parameters oldfilelocation and newfilelocation, with default value
1 statement
def convert_file(oldfilelocation='C:/Muziek' + (gt.finalyoutubetitle) + '.m4a',
newfilelocation='C:/Muziek' + (gt.finalyoutubetitle) + '.mmp3'):
os.rename(oldfilelocation, newfilelocation)

Related

is there an easy way to add a progress bar/counter where I can add a line to increment it every so often - Not Timed

I have a script that is basically complete. I'd like to add some sort of a progress bar to it, instead of printing out each step as it passes by
is there anything that will let me do this.
setup a progress widget/counter/loop
give it a command function to increment
do some script
add the code to advance/increment the progress bar
do some more script
add the code to advance/increment the progress bar
do some more script
add the code to advance/increment the progress bar
do some more script
add the code to advance/increment the progress bar
also, can you please give me an example of some sort
I've looked at 3 or 4 different "progress bar" type libraries, and none give an example of doing it this way
all of the examples I seem to find want to do it by time or by byte size for downloading files
There is a number of progress bars in PIP, I recommend ezprogress if you run python3.
from ezprogress.progressbar import ProgressBar
import time
# Number of steps in your total script
steps_needed = 100
current_step = 0
# setup progress bar
pb = ProgressBar(steps_needed, bar_length=100)
pb.start()
# Do what your script wants
...
# Increment counter
current_step += 1
pb.update(current_step)
# Do what your script wants
...
# When you are done you can force the progress bar to finish
PB.finished()
The progress bar did not support turning off time estimation, however it is now possible in the newest version, just upgrade from PIP.
To turn off time estimation the progress bar just needs to be started with the parameter no_time=True like in the code below:
pb = ProgressBar(steps_needed, bar_length=100, no_time=True)
create your progressbar.py module
import sys
import copy
currentProgressCnt = 0
progressCntMax = 0 #
progressBarWidth = 50 # in chars
scaleFctr = 0.0
tasksToDo = []
class ProgressIndicator:
def showProgress(self):
global progressCntMax
global currentProgressCnt
cr = "\r"
progressChar = '#'
fillChar = '.'
progressBarDone = currentProgressCnt*progressChar*scaleFctr
progressBarRemain = fillChar*(progressCntMax - currentProgressCnt)*scaleFctr
percent = str(int((float(currentProgressCnt)/float(progressCntMax))*100)) + " % completed "
taskId = '(' + tasksToDo[currentProgressCnt - 1] + ') '
quote = str(currentProgressCnt) + '/' + str(progressCntMax) + ' '
sys.stdout.write(cr + progressBarDone + progressBarRemain + ' ' + percent + taskId + quote)
sys.stdout.flush()
if currentProgressCnt == progressCntMax:
print
def incProgress(self):
global currentProgressCnt
currentProgressCnt += 1
def setLastStep(self, size):
global progressCntMax, scaleFctr
progressCntMax = size
scaleFctr = progressBarWidth / progressCntMax
def setTaskList(self, taskList):
global tasksToDo
tasksToDo = copy.copy(taskList)
self.setLastStep(len(tasksToDo))
in main, use the ProgressIndicator class like this:
from progressbar import ProgressIndicator
import time
import datetime
#########################################
### MAIN ###
### SIMULATION ###
#########################################
# your procedure list you have to run
toDoList = ['proc1', 'proc2', 'proc3', 'proc1', 'proc4', 'proc5',
'proc6', 'proc7', 'proc21', 'proc32', 'proc43', 'proc51',
'proc4', 'proc65', 'proc76', 'proc87']
progressLine = ProgressIndicator() # create your indicator
progressLine.setTaskList(toDoList) # set params
# your main work
i = 0; lastTask = len(toDoList)
# log the start
startTime = str(datetime.datetime.now())
print ( startTime + " main started")
while i < lastTask:
# run your task list here
time.sleep(1) # simulating your toDoList[i]() run
i += 1
progressLine.incProgress() # use when task done, incrase progress
progressLine.showProgress() # use for update display
# work is done, log the end
endTime = str(datetime.datetime.now())
print ( endTime + " main finished")

Automatically refactor string concatenation to string substitution?

I'm faced with a really ugly code that is a code generator, that takes a configuration file and outputs C code.
It works, but the script is full of things like:
outstr = "if(" + mytype + " == " + otherType + "){\n"
outstr += " call_" + fun_for_type(mytype) + "();\n"
outstr += "}\n"
# Now imagine 1000 times more lines like the previous ones...
Is there a tool to automatically change code like that to something more palatable (partial changes are more than welcome)? Like:
outstr = """if ({type} == {otherType}) {
call_{fun_for_type}({type});
}
""".format(type=mytype, otherType=otherType, fun_for_type=(mytype))
If this would have been C then I would have abused of Coccinelle, but I don't know of similar tools for Python.
Thanks
You can use dictionaries :
datas = {"type":mytype, "otherType":otherType, "fun_for_type":(mytype)}
outstr = "if ({type} == {otherType}) {{\n\
call_{fun_for_type}({type});\n\
}}\n".format(**datas)

Simulate multiple users in Grinder

I was wondering if this is even possible. I just set up Grinder and ran some base test but what if I want to have each thread be a different user? I see this line of code in the file that is generated (I am not a python developer)-could I somehow pass the username/password as a variable?
# Expecting 302 'Found'
result = request10501.POST('/site/home' +
'?p_p_id=' +
self.token_p_p_id +
'&p_p_lifecycle=' +
self.token_p_p_lifecycle +
'&p_p_state=' +
self.token_p_p_state +
'&p_p_mode=' +
self.token_p_p_mode +
'&p_p_col_id=' +
self.token_p_p_col_id +
'&p_p_col_count=' +
self.token_p_p_col_count +
'&_58_doActionAfterLogin=' +
self.token__58_doActionAfterLogin +
'&_58_struts_action=' +
self.token__58_struts_action +
'&saveLastPath=' +
self.token_saveLastPath,
( NVPair('_58_formDate', '1466168922083'),
NVPair('_58_login', 'user1'),
NVPair('_58_password', 'pass1'), ),
( NVPair('Content-Type', 'application/x-www-form-urlencoded'), ))
Thanks
So what I have done is maintain the users in a csv file and read them into an array . Now for eg there are 3 threads you can use a multiple of grinder.getRunNumber& grinder.getThreadNumber(check the exact api name) and extract that record dynamically .
Move the user1 & pass1 to a global scope and perform all the logic there.
See the API Link

Unable to display all the information except for first selection

I am using the following code to process a list of images that is found in my scene, before the gathered information, namely the tifPath and texPath is used in another function.
However, example in my scene, there are 3 textures, and hence I should be seeing 3 sets of tifPath and texPath but I am only seeing 1 of them., whereas if I am running to check surShaderOut or surShaderTex I am able to see all the 3 textures info.
For example, the 3 textures file path is as follows (in the surShaderTex): /user_data/testShader/textureTGA_01.tga, /user_data/testShader/textureTGA_02.tga, /user_data/testShader/textureTGA_03.tga
I guess what I am trying to say is that why in my for statement, it is able to print out all the 3 results and yet anything bypass that, it is only printing out a single result.
Any advices?
surShader = cmds.ls(type = 'surfaceShader')
for con in surShader:
surShaderOut = cmds.listConnections('%s.outColor' % con)
surShaderTex = cmds.getAttr("%s.fileTextureName" % surShaderOut[0])
path = os.path.dirname(surShaderTex)
f = surShaderTex.split("/")[-1]
tifName = os.path.splitext(f)[0] + ".tif"
texName = os.path.splitext(f)[0] + ".tex"
tifPath = os.path.join(path, tifName)
texPath = os.path.join(path, texName)
convertText(surShaderTex, tifPath, texPath)
Only two lines are part of your for loop. The rest only execute once.
So first this runs:
surShader = cmds.ls(type = 'surfaceShader')
for con in surShader:
surShaderOut = cmds.listConnections('%s.outColor' % con)
surShaderTex = cmds.getAttr("%s.fileTextureName" % surShaderOut[0])
Then after that loop, with only one surShader, one surShaderOut, and one surShaderTex, the following is executed once:
path = os.path.dirname(surShaderTex)
f = surShaderTex.split("/")[-1]
tifName = os.path.splitext(f)[0] + ".tif"
texName = os.path.splitext(f)[0] + ".tex"
tifPath = os.path.join(path, tifName)
texPath = os.path.join(path, texName)
Indent that the same as the lines above it, and it'll be run for each element of surShader instead of only once.

How do I get Boto to return EC2 instances - S3 works fine

I'm having some issues with the EC2 bit of Boto (Boto v2.8.0, Python v2.6.7).
The first command returns a list of S3 Buckets - all good! The second command to get a list of EC2 instances blows up with a 403 with "Query-string authentication requires the Signature, Expires and AWSAccessKeyId parameters"
s3_conn = S3Connection(AWSAccessKeyId, AWSSecretKey)
print s3_conn.get_all_buckets()
ec2_conn = EC2Connection(AWSAccessKeyId, AWSSecretKey)
print ec2_conn.get_all_instances()
Also, my credentials are all good (Full admin) - I tested them using the Ruby aws-sdk, both EC2 and S3 work fine.
I also noticed that the host attribute in the ec2_conn object is s3-eu-west-1.amazonaws.com, "s3"...? Surely thats wrong? I've tried retro fixing it to the correct endpoint but no luck.
Any help would be great appreciate
Thanks
Here's some working code I use to list all my instances across potentially multiple regions.
Its doing a lot more than you need, but maybe you can pare it down to what you want.
#!/usr/bin/python
import boto
import boto.ec2
import sys
class ansi_color:
red = '\033[31m'
green = '\033[32m'
reset = '\033[0m'
grey = '\033[1;30m'
def name(i):
if 'Name' in i.tags:
n = i.tags['Name']
else:
n = '???'
n = n.ljust(16)[:16]
if i.state == 'running':
n = ansi_color.green + n + ansi_color.reset
else:
n = ansi_color.red + n + ansi_color.reset
return n
def pub_dns( i ):
return i.public_dns_name.rjust(43)
def pri_dns( i ):
return i.private_dns_name.rjust(43)
def print_instance( i ):
print ' ' + name(i) + '| ' + pub_dns(i) + ' ' + pri_dns(i)
regions = sys.argv[1:]
if len(regions)==0:
regions=['us-east-1']
if len(regions)==1 and regions[0]=="all":
rr = boto.ec2.regions()
else:
rr = [ boto.ec2.get_region(x) for x in regions ]
for reg in rr:
print "========"
print reg.name
print "========"
conn = reg.connect()
reservations = conn.get_all_instances()
for r in reservations:
# print ansi_color.grey + str(r) + ansi_color.reset
for i in r.instances:
print_instance(i)
There is the connect_to_region command:
import boto.ec2
connection = boto.ec2.connect_to_region('eu-west-1', aws_access_key_id=AWSAccessKeyId,
aws_secret_access_key=AWSSecretKey)
The Boto tutorial gives another way. That method would basically work like this:
import boto.ec2
for region in boto.ec2.regions():
if region.name == 'my-favorite-region':
connection = region.connect()
break
This has not been working on older versions of Boto.
Do you have your IAM credentials in order? The given access key should have rights for EC2. If you're not sure, you can add the policy AmazonEC2FullAccess to test, and later tune this down.

Categories