I am trying to write a script that tar a directory and scp's to a server which have lots of tar files. I am having trouble in creating tar of the directories, here is the complete script. Why is that happening?
Code:
#!/usr/bin/python
import json
from pprint import pprint
import subprocess
import os
from os.path import expanduser
import time
import os.path
import shutil
import tarfile
import smtplib
import zipfile
import glob
def checkFileDownload():
os.system("scp ***#***.***.***.***:/var/log/apache2/access.log ~/pingMeServeraccess.log")
def sendNotificationText(server="smtp.gmail.com",userName="***#***.com",password="********",cellNumber="***********",testLink="Test"):
server = smtplib.SMTP_SSL(server, ***)
server.login(userName,password)
server.sendmail(userName,cellNumber,testLink)
def sendTarFileToPingMeServer(locationOfTarFile="/home/autotest/tarPackage",nameOfTarFile=""):
fullPathOfFile = nameOfTarFile
scpCommand = "scp -r "+ fullPathOfFile +" ***#***.***.***.***:/home/autotest/untethered/"
try:
os.popen(scpCommand)
testLink= "\nhttp://***.***.***.***/" + nameOfTarFile.split('/')[-1]
sendNotificationText(testLink = testLink)
except:
print "something went wrong"
def makeTarFile(sourceDir):
if os.path.exists(expanduser("~/tarPackage")):
shutil.rmtree(expanduser("~/tarPackage"))
else:
pass
dstFolder = expanduser('~/tarPackage')
crtDstFolder = 'mkdir -p ' + dstFolder
os.system(crtDstFolder)
archiveName = str(time.time())+'.tar'
print 'creating archive, '+archiveName
out = tarfile.open(expanduser('~/tarPackage/'+archiveName), mode='w')
try:
out.add(sourceDir)
sendTarFileToPingMeServer(nameOfTarFile=archiveName)
finally:
out.close()
checkFileDownload()
def getTest(userName):
testLoc = check(userName)
gitList= [];TestList = []; packageDir = "mkdir ~/testPackageDir"
if os.path.exists(expanduser("~/testPackageDir")):
shutil.rmtree(expanduser("~/testPackageDir"))
else:
pass
originalDirectory = os.getcwd()
gitrepo = ""
for test,gitLink in testLoc.items():
if gitLink not in gitList:
gitRepo = expanduser("~/tempGit_"+str(time.time()))
p = subprocess.Popen(["git", "clone", gitLink,gitRepo], stdout=subprocess.PIPE)
out,err = p.communicate()
gitList.append(gitLink)
testLink = gitRepo + test
if os.path.isfile(testLink):
os.system(packageDir)
relPath = test.rstrip(test.split('/')[-1])
x = "mkdir -p ~/testPackageDir"+relPath
os.system(x)
y = "~/testPackageDir" + relPath
cpTest = "cp "+testLink+" "+ expanduser(y)
os.system(cpTest)
else:
print "git link already cloned, skipping, checking for test cases."
testLink = gitRepo + test
if os.path.isfile(testLink):
relPath = test.rstrip(test.split('/')[-1])
x = "mkdir -p ~/testPackageDir"+relPath
os.system(x)
y = "~/testPackageDir" + relPath
cpTest = "cp "+testLink+" "+ expanduser(y)
os.system(cpTest)
makeTarFile(expanduser("~/testPackageDir"))
os.system("cd ~; rm -rf tempGit_*;cd -; rm -rf ~/testPackageDir")
def check(userName):
p = subprocess.Popen(["ls", "/var/www/tempdata/testexec"], stdout=subprocess.PIPE)
out,err = p.communicate()
out = out.split('\n')[:-1]
for fileName in out:
if userName in fileName:
filePath = "/var/www/tempdata/testexec/"+fileName
json_data=open(filePath)
data = json.load(json_data)
testLoc = searchForGitTest(data)
curDict = os.popen("pwd")
os.system("cd ~")
return testLoc
def searchForGitTest(data):
aux = {};auxList= []
for idx in range(len(data["rows"])):
scriptPath = data["rows"][idx]["scriptPath"]
gitPath = data["rows"][idx]["gitPath"]
aux[scriptPath] = gitPath
return aux
if __name__ == "__main__":
getTest("user")
Attaching the run:
autotest#batman007:/var/www$ python testPackageUploader.py
remote: Counting objects: 38357, done
remote: Finding sources: 100% (38357/38357)
remote: Total 38357 (delta 15889), reused 36060 (delta 15889)
Receiving objects: 100% (38357/38357), 652.78 MiB | 17.08 MiB/s, done.
Resolving deltas: 100% (15889/15889), done.
git link already cloned, skipping, checking for test cases.
creating archive
1407871278.15.tar: No such file or directory
access.log 100% 21KB 21.3KB/s 00:00
/var/www
The problem in this script was I was not closing the file and sending it to the server. One of my colleagues helped me to figure out this problem.
Related
import pathlib
import subprocess
import argparse
import os
from _datetime import datetime
def get_unique_run_id():
if os.environ.get("BUILD_NUMBER"):
unique_run_id = os.environ.get("BUILD_NUMBER")
elif os.environ.get("CUSTOM_BUILD_NUMBER"):
unique_run_id = os.environ.get("CUSTOM_BUILD_NUMBER")
else:
unique_run_id = datetime.now().strftime('%Y%M%D%H%M%S')
os.environ['UNIQUE_RUN_ID'] = unique_run_id
return unique_run_id
def create_output_directory(prefix='results_'):
global run_id
if not run_id:
raise Exception("Variable 'run_id' is not set. Unable to create output directory")
curr_file_path = pathlib.Path(__file__).parent.absolute()
dir_to_create = os.path.join(curr_file_path, prefix + str(run_id))
os.mkdir(dir_to_create)
print(f"Created output directory: {dir_to_create}")
return dir_to_create
if __name__ == "__main__":
run_id = get_unique_run_id()
output_dir = create_output_directory()
json_out_dir = os.path.join(output_dir, 'json_report_out.json')
junit_out_dir = os.path.join(output_dir, 'junit_report_out')
# import pdb; pdb.set_trace()
parser = argparse.ArgumentParser()
parser.add_argument('--test_directory', required=False, help='Specify the location of the test file')
parser.add_argument('--behave_options', type=str, required=False, help='String of behave options')
args = parser.parse_args()
test_directory = '' if not args.test_directory else args.test_directory
behave_options = '' if not args.behave_options else args.behave_options
command = f'behave -k--no-capture -f json.pretty -o {json_out_dir} ' \
f'--junit --junit-directory {junit_out_dir}' \
f'{behave_options} ' \
f'{test_directory}'
print(f"Running command : {command}")
rs = subprocess.run(command, shell=True)
When I try to run this I'm getting an error as follows:
FileNotFoundError: [WinError 3] The system cannot find the path specified: 'E:\Projects\results_20204710/11/20194751'. Please help me to find a solution for this.
Thought it could be installer error. So tried both 32bit and 64bit python installers. I'm totally lost here.
For a single directory:
os.mkdir(...)
For nested directories:
os.makedirs(...)
You can also check if a diretory exists:
os.path.exists(...)
At this point the script works great for a single file. When a directory is given it uses tar to create a singe file which works well, then the tar file is gpg encrypted with a password provided. The gpg works also. The problem is that when you decrypt the gpg file the tar is corrupted every time. I'm trying to find what I'm doing wrong here. Please help.
#!/usr/bin/env python3
# Takes file in does symmetric encryption with the password you provide
# then adds it to a running IPFS(ipfs.io) instance.
#
import os
import argparse
import gnupg
import ipfsapi
import tarfile
# Parse command arguments
parser = argparse.ArgumentParser(description='Encrypt file/directory and add it to IPFS')
parser.add_argument('-i','--input', help='File.txt or Directory', required=True)
parser.add_argument('-p','--password', help='Password to encrypt with', required=True)
args = parser.parse_args()
# Set GPG Home directory
gpg = gnupg.GPG(homedir='')
# Set GPG Encoding
gpg.encoding = 'utf-8'
# Get dataToEncrypt full path
dataToEncrypt = (os.path.abspath(args.input))
# Setup tar filename to end with .zip
tarFile = ("{}.tar".format(dataToEncrypt))
# Setup encrypted filename to end with .gpg
encryptedFile = ("{}.tar.gpg".format(dataToEncrypt))
# Tell module where IPFS instance is located
api = ipfsapi.connect('127.0.0.1', 5001)
def dataTar():
if os.path.isfile(dataToEncrypt):
return
else:
#return
with tarfile.open(tarFile, 'w|') as tar:
tar.add(dataToEncrypt)
tar.close()
def encryptFile():
passphrase = (args.password)
if os.path.isfile(dataToEncrypt):
with open(dataToEncrypt, 'rb') as f:
status = gpg.encrypt(f,
encrypt=False,
symmetric='AES256',
passphrase=passphrase,
armor=False,
output=dataToEncrypt + ".gpg")
else:
with open(tarFile, 'rb') as f:
status = gpg.encrypt(f,
encrypt=False,
symmetric='AES256',
passphrase=passphrase,
armor=False,
output=dataToEncrypt + ".tar.gpg")
print ('ok: ', status.ok)
print ('status: ', status.status)
print ('stderr: ', status.stderr)
def ipfsFile(encryptedFile):
# Add encrypted file to IPFS
ipfsLoadedFile = api.add(encryptedFile, wrap_with_directory=True)
# Return Hash of new IPFS File
fullHash = (ipfsLoadedFile[1])
ipfsHash = fullHash['Hash']
return(ipfsHash)
def delEncryptedFile(encryptedFile):
try:
os.remove(encryptedFile)
except:
print("Error: %s unable to find or delete file." % encryptedFile)
def main():
dataTar()
encryptFile()
#ipfsFile(encryptedFile)
#print ("File encrypted and added to IPFS with this hash " + ipfsFile(encryptedFile))
#delEncryptedFile(encryptedFile)
if __name__ == "__main__":
main()
Code looks fine. I just tried it with https://pypi.org/project/python-gnupg/ and it works fine. I had to fix the API's according to this package, but I don't think that matters. Just diff it to see the changes. I don't see any problem except that you should be using gpg -d file.tar.pgp | tar xvf -.
#!/usr/bin/env python3
# Takes file in does symmetric encryption with the password you provide then
# adds it to a running IPFS (ipfs.io) instance.
import os
import argparse
import gnupg
import tarfile
parser = argparse.ArgumentParser(
description='Encrypt file/directory and add it to IPFS')
parser.add_argument('-i','--input',
help='File.txt or Directory',
required=True)
parser.add_argument('-p','--password',
help='Password to encrypt with',
required=True)
args = parser.parse_args()
gpg = gnupg.GPG()
gpg.encoding = 'utf-8'
dataToEncrypt = (os.path.abspath(args.input))
tarFile = ("{}.tar".format(dataToEncrypt))
encryptedFile = ("{}.tar.gpg".format(dataToEncrypt))
def dataTar():
if os.path.isfile(dataToEncrypt):
return
else:
with tarfile.open(tarFile, 'w|') as tar:
tar.add(dataToEncrypt)
tar.close()
def encryptFile():
passphrase = (args.password)
if os.path.isfile(dataToEncrypt):
with open(dataToEncrypt, 'rb') as f:
status = gpg.encrypt(f.read(),
recipients=None,
symmetric='AES256',
passphrase=passphrase,
armor=False,
output=dataToEncrypt + ".gpg")
else:
with open(tarFile, 'rb') as f:
status = gpg.encrypt(f.read(),
recipients=None,
symmetric='AES256',
passphrase=passphrase,
armor=False,
output=dataToEncrypt + ".tar.gpg")
print ('ok: ', status.ok)
print ('status: ', status.status)
print ('stderr: ', status.stderr)
def ipfsFile(encryptedFile):
ipfsLoadedFile = api.add(encryptedFile, wrap_with_directory=True)
fullHash = (ipfsLoadedFile[1])
ipfsHash = fullHash['Hash']
return(ipfsHash)
def delEncryptedFile(encryptedFile):
try:
os.remove(encryptedFile)
except:
print("Error: %s unable to find or delete file." % encryptedFile)
def main():
dataTar()
encryptFile()
if __name__ == "__main__":
main()
I'm trying to make python script (currently on windows) which will open some sub-processes (which will run infinitely) and script should periodically check do all of opened sub-processes still work correctly. So it should be done with while loop, I guess.
The sub-processes are about FFMPEG livestreaming.
The problem is when I do time.sleep(n) in my loop, because then every FFMPEG livestream stops, so I suppose time.sleep affect on all of child subprocesses.
I have no idea how to make it work.
Here is my python code:
import os, time, sys, datetime, smtplib, configparser, logging, subprocess, psutil
import subprocess
def forwardudpstream(channel_number, ip_input, ip_output):
try:
ffmpeg_command = 'ffmpeg -i udp://' + ip_input + ' -vcodec copy -acodec copy -f mpegts "udp://' + ip_output + '?pkt_size=1316"'
ffmpeg_output = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
return str(ffmpeg_output.pid)
except:
print ("Exception!")
return '0'
while True:
configuration = 'config.ini'
channel_list_file = 'CHANNEL_LIST.conf'
pid_folder = "D:\\Forward_UDP_Stream\\pids\\"
channel_list = [line.rstrip('\n') for line in open(channel_list_file)]
for line in channel_list:
if not line.startswith('#') and ('|' in line):
channel_number, ip_input, ip_output = line.split('|')
print('----------')
print("Channel number = ", channel_number)
print("IP Input = ", ip_input)
print("IP Output = ", ip_output)
pid_file_found = False
print("Checking if pid file exists...")
for pidfile in os.listdir(pid_folder):
if pidfile.startswith(channel_number + '-'):
print("Pid file is found for this channel.")
pid_file_found = True
pid = int(pidfile.split('-')[1].split('.')[0])
print("PID = ", str(pid))
print("Checking if corresponding process is active...")
if not psutil.pid_exists(pid):
print("Process is not active.")
print("Removing old pid file.")
os.remove(pid_folder + pidfile)
print("Starting a new process...")
pid_filename = channel_number + '-' + forwardudpstream(channel_number, ip_input, ip_output) + '.pid'
pid_file = open(pid_folder + pid_filename, "a")
pid_file.write("Process is running.")
pid_file.close()
else:
print("Process is active!")
break
if pid_file_found == False:
print("Pid file is not found. Starting a new process and creating pid file...")
pid_filename = channel_number + '-' + forwardudpstream(channel_number, ip_input, ip_output) + '.pid'
pid_file = open(pid_folder + pid_filename, "a")
pid_file.write("Process is running.")
pid_file.close()
time.sleep(10)
Here is my CHANNEL_LIST.conf file example:
1|239.1.1.1:10000|239.1.1.2:10000
2|239.1.1.3:10000|239.1.1.4:10000
Perhaps there is some other solution to put waiting and sub-processes to work together. Does anyone have an idea?
UPDATE:
I finally make it work when I removed stdout=subprocess.PIPE part from the subprocess command.
Now it looks like this:
ffmpeg_output = subprocess.Popen(ffmpeg_command, stderr=subprocess.STDOUT, shell=False)
So now I'm confused why previous command was making a problem...?
Any explanation?
If I run "python /home/pi/temp/getTemp.py" from the terminal command line I get
"Error, serial port '' does not exist!" If I cd to the temp directory and run "python getTemp.py" it runs fine. Can anyone tell me why?
#!/usr/bin/env python
import os
import sys
import socket
import datetime
import subprocess
import signal
port = "/dev/ttyUSB0"
tlog = '-o%R,%.4C'
hlog = '-HID:%R,H:%h'
clog = '-OSensor %s C: %.2C'
def logStuff(data):
with open("/home/pi/temp/templog.txt", "a") as log_file:
log_file.write(data + '\n')
def main():
try:
output = subprocess.check_output(['/usr/bin/digitemp_DS9097U', '-q', '-a'])
for line in output.split('\n'):
if len(line) == 0:
logStuff("len line is 0")
continue
if 'Error' in line:
logStuff("error in output")
sys.exit()
line = line.replace('"','')
if line.count(',') == 1:
(romid, temp) = line.split(',')
poll = datetime.datetime.now().strftime("%I:%M:%S %p on %d-%B-%y")
content =(romid + "," + poll + "," + temp)
print content
return content
except subprocess.CalledProcessError, e:
print "digitemp error:\n", e.output
except Exception as e:
logStuff('main() error: %s' %e)
os.kill(os.getpid(), signal.SIGKILL)
if __name__ == "__main__":
main()
It probably cannot find the configuration file, which is normally stored in ~/.digitemprc when you run it with -i to initialize the network. If it was created in a different directory you need to always tell digitemp where to find it by passing -c
The problem: I'm trying to dynamically build a python user-data script for amazon in a jenkins deploy script and pass it to an ASG to be executed at runtime. I pass my vars to the deploy script and then dynamically create the python script based on arguments.
I'm getting an unexpected string replacement error and I'm not entirely sure why handoff.sh is what passed the arguments from jenkins to the deploy script:
the error:
[deploy-and-configure-test] $ /bin/sh -xe /tmp/hudson8978997207867591628.sh
+ sh /var/lib/jenkins/workspace/deploy-and-configure-test/handoff.sh
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/deploy-and-configure-test/asgBuilder.py", line 393, in <module>
''' % (str(repo), str(playbook),str(user_data_ins), str(in_user_data)))
TypeError: %u format: a number is required, not str
the dynamic portion of my deploy script:
in_user_data = args.in_user_data
playbook = args.playbook
repo = args.repo
user_data_ins = ('''export CLOUD_ENVIRONMENT=%s\n
export CLOUD_MONITOR_BUCKET=%s\n
export CLOUD_APP=%s\n
export CLOUD_STACK=%s\n
export CLOUD_CLUSTER=%s\n
export CLOUD_AUTO_SCALE_GROUP=%s\n
export CLOUD_LAUNCH_CONFIG=%s\n
export EC2_REGION=%s\n
export CLOUD_DEV_PHASE=%s\n
export CLOUD_REVISION=%s\n
export CLOUD_DOMAIN=%s\n
export SG_GROUP=%s\n''' % (cloud_environment,
cluster_monitor_bucket,
cluster_name,
cloud_stack,
cloud_cluster,
cloud_auto_scale_group,
cloud_launch_config,
provider_region,
cloud_dev_phase,
cloud_revision,
cloud_domain,
export_env_sg_name))
user_data_ins = ('''
#!/usr/bin/python
import os
import subprocess
import time
import uuid
def shell_command_execute(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
print output
return output
repo = "%s"
playbook = "%s"
echo_bash_profile = "echo %s >> ~/.bash_profile" % user_echo
shell_command_execute(echo_bash_profile)
var_user_data = "%s"
for varb in var_user_data.split('|'):
echo_bash_profile_passed = "echo " + varb + " >> ~/.bash_profile"
shell_command_execute(echo_bash_profile_passed)
command = 'git clone ' + repo
shell_command_execute(command)
folder = repo.split('/')[4].replace('.git','')
#https://github.com/test/test.git # replaced for security.
execute_playbook = ('ansible-playbook -i "localhost," -c local' + '/' + os.path.dirname(os.path.realpath(__file__)) + '/' + folder + '/' + playbook >> ansible.log')
print execute_playbook
shell_command_execute(execute_playbook)
''' % (str(repo), str(playbook),str(user_data_ins), str(in_user_data)))
text_file = open("user-data.py", "wa")
text_file.write(user_data_ins)
text_file.close()
lc_user_data = '${file("%s/user-data.py")}' %wd
updated still not working
user_data_ins = ('''export CLOUD_ENVIRONMENT=%s\n
export CLOUD_MONITOR_BUCKET=%s\n
export CLOUD_APP=%s\n
export CLOUD_STACK=%s\n
export CLOUD_CLUSTER=%s\n
export CLOUD_AUTO_SCALE_GROUP=%s\n
export CLOUD_LAUNCH_CONFIG=%s\n
export EC2_REGION=%s\n
export CLOUD_DEV_PHASE=%s\n
export CLOUD_REVISION=%s\n
export CLOUD_DOMAIN=%s\n
export SG_GROUP=%s\n''' % (cloud_environment,
cluster_monitor_bucket,
cluster_name,
cloud_stack,
cloud_cluster,
cloud_auto_scale_group,
cloud_launch_config,
provider_region,
cloud_dev_phase,
cloud_revision,
cloud_domain,
export_env_sg_name))
user_data_ins = ('''
#!/usr/bin/python
import os
import subprocess
import time
import uuid
def shell_command_execute(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
print output
return output
repo = "%s"
playbook = "%s"
echo_bash_profile = "echo %s >> ~/.bash_profile" % user_echo
shell_command_execute(echo_bash_profile)
var_user_data = "%s"
for varb in var_user_data.split('|'):
echo_bash_profile_passed = "echo " + varb + " >> ~/.bash_profile"
shell_command_execute(echo_bash_profile_passed)
command = 'git clone ' + repo
shell_command_execute(command)
folder = repo.split('/')[4].replace('.git','')
#https://github.com/zukeru/vision_provis.git
execute_playbook = ('ansible-playbook -i "localhost," -c local' + '/' + os.path.dirname(os.path.realpath(__file__)) + '/' + folder + '/' + playbook >> ansible.log')
print execute_playbook
shell_command_execute(execute_playbook)
''' % (str(repo), str(playbook),str(user_data_ins), str(in_user_data)))
text_file = open("user-data.py", "wa")
text_file.write(user_data_ins)
text_file.close()
lc_user_data = '${file("%s/user-data.py")}' %wd
#Grant Zukel
I would recommend doing the following.
In the last line change to
'''.format (str(repo), str(playbook),str(user_data_ins), str(in_user_data)))
And in your code change your first %s to {0} which would be str(repo) and every subsequent would be {1}... {2} etc
The problem is you have string replacement inside the string.
Whenever you have this, you need to have double percent:
echo_bash_profile = "echo %s >> ~/.bash_profile" %% user_echo
It is this line that is causing the error
'''bash_profile % user_echo'''
I would recommend using the string.format method if you are using python 2.6 or higher
Try this:
user_data_ins = ('''
#!/usr/bin/python
import os
import subprocess
import time
import uuid
def shell_command_execute(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
print output
return output
repo = "{0}"
playbook = "{1}"
echo_bash_profile = "echo {2} >> ~/.bash_profile" % user_echo
shell_command_execute(echo_bash_profile)
var_user_data = "{3}"
for varb in var_user_data.split('|'):
echo_bash_profile_passed = "echo " + varb + " >> ~/.bash_profile"
shell_command_execute(echo_bash_profile_passed)
command = 'git clone ' + repo
shell_command_execute(command)
folder = repo.split('/')[4].replace('.git','')
#https://github.com/zukeru/vision_provis.git
execute_playbook = ('ansible-playbook -i "localhost," -c local' + '/' + os.path.dirname(os.path.realpath(__file__)) + '/' + folder + '/' + playbook >> ansible.log')
print execute_playbook
shell_command_execute(execute_playbook)
'''.format(str(repo), str(playbook),str(user_data_ins), str(in_user_data)))
This line seems to be causing the issue:
echo_bash_profile = "echo %s >> ~/.bash_profile" % user_echo
Likely it sees the % user as %u.
Ok so #FirebladDan you were right i miseed one here is the working code:
user_data_ins = ('''
#!/usr/bin/python
import os
import subprocess
import time
import uuid
def shell_command_execute(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
print output
return output
repo = "%s"
playbook = "%s"
echo_bash_profile = "echo " + %s + " >> ~/.bash_profile"
shell_command_execute(echo_bash_profile)
var_user_data = "%s"
for varb in var_user_data.split('|'):
echo_bash_profile_passed = "echo " + varb + " >> ~/.bash_profile"
shell_command_execute(echo_bash_profile_passed)
command = 'git clone ' + repo
shell_command_execute(command)
folder = repo.split('/')[4].replace('.git','')
#https://github.com/zukeru/vision_provis.git
execute_playbook = ('ansible-playbook -i "localhost," -c local' + '/' + os.path.dirname(os.path.realpath(__file__)) + '/' + folder + '/' + playbook >> ansible.log')
print execute_playbook
shell_command_execute(execute_playbook)
''' % (str(repo), str(playbook),str(user_data_ins), str(in_user_data)))
text_file = open("user-data.py", "wa")
text_file.write(user_data_ins)
text_file.close()
lc_user_data = '${file("%s/user-data.py")}' %wd