I have a problem. I run tests with the help of the question.
In the beginning, the test calls a method that causes me to enter the address of the database (where I am testing). However, I am getting an error:
element = "http://" +sys.stdin.readline()../../python/lib/python3.6/site-packages/_pytest/capture.py:702: in read
raise IOError ("reading from stdin while output is captured")
E OSError: reading from stdin while output is captured
below my code.
#staticmethod
def setAddress():
print("Give database:")
element = "http://"+sys.stdin.readline()
return element
I need to addres add http. How I can change my code? Thanks for help!
Set an environment variable when running tests in your shell:
DB_URL=http://xxx pytest
and then retrieve it in your tests:
import os
…
db_url = os.getenv('DB_URL')
I am trying to create a secure (e.g., SSL/HTTPS) XML-RPC Client Server. The client-server part works perfectly when the required certificates are present on my system; however, when I try to create the certificates during execution, I receive a FileNotFoundError when opening the ssl-wrapped socket even though the certificates are clearly present (because the preceding function created them.)
Why is the FileNotFoundError given when the files are present? (If I simply close and restart the python script no error is produced when opening the socket and everything works with no issue whatsoever.)
I've searched elsewhere for solutions, but the best/closest answer I've found is, perhaps, "race conditions" between creating the certificates and opening them. However, I've tried adding "sleep" to alleviate the possibility of race conditions (as well as running each function individually via a user input menu) with the same error every time.
What I am missing?
Here is a snippet of my code:
import os
import threading
import ssl
from xmlrpc.server import SimpleXMLRPCServer
import certs.gencert as gencert # <---- My python module for generating certs
...
rootDomain = "mydomain"
CERTFILE = "certs/mydomain.cert"
KEYFILE = "certs/mydomain.key"
...
def listenNow(ipAdd, portNum, serverCert, serverKey):
# Create XMLRPC Server, based on ipAdd/port received
server = SimpleXMLRPCServer((ipAdd, portNum))
# **THIS** is what causes the FileNotFoundError ONLY if
# the certificates are created during THE SAME execution
# of the program.
server.socket = ssl.wrap_socket(server.socket,
certfile=serverCert,
keyfile=serverKey,
do_handshake_on_connect=True,
server_side=True)
...
# Start server listening [forever]
server.serve_forever()
...
# Verify Certificates are present; if not present,
# create new certificates
def verifyCerts():
# If cert or key file not present, create new certs
if not os.path.isfile(CERTFILE) or not os.path.isfile(KEYFILE):
# NOTE: This [genert] will create certificates matching
# the file names listed in CERTFILE and KEYFILE at the top
gencert.gencert(rootDomain)
print("Certfile(s) NOT present; new certs created.")
else:
print("Certfiles Verified Present")
# Start a thread to run server connection as a daemon
def startServer(hostIP, serverPort):
# Verify certificates present prior to starting server
verifyCerts()
# Now, start thread
t = threading.Thread(name="ServerDaemon",
target=listenNow,
args=(hostIP,
serverPort,
CERTFILE,
KEYFILE
)
)
t.daemon = True
t.start()
if __name__ == '__main__':
startServer("127.0.0.1", 12345)
time.sleep(60) # <--To allow me to connect w/client before closing
When I run the above, with NO certificates present, this is the error I receive:
$ python3 test.py
Certfile(s) NOT present; new certs created.
Exception in thread ServerDaemon:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner
self.run()
File "/usr/lib/python3.5/threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "test.py", line 41, in listenNow
server_side=True)
File "/usr/lib/python3.5/ssl.py", line 1069, in wrap_socket
ciphers=ciphers)
File "/usr/lib/python3.5/ssl.py", line 691, in __init__
self._context.load_cert_chain(certfile, keyfile)
FileNotFoundError: [Errno 2] No such file or directory
When I simply re-run the script a second time (i.e., the cert files are already present when it starts, everything runs as expected with NO errors, and I can connect my client just fine.
$ python3 test.py
Certfiles Verified Present
What is preventing the ssl.wrap_socket function from seeing/accessing the files that were just created (and thus producing the FileNotFoundError exception)?
EDIT 1:
Thanks for the comments John Gordon. Here is a copy of gencert.py, courtesy of Atul Varm, found here https://gist.github.com/toolness/3073310
import os
import sys
import hashlib
import subprocess
import datetime
OPENSSL_CONFIG_TEMPLATE = """
prompt = no
distinguished_name = req_distinguished_name
req_extensions = v3_req
[ req_distinguished_name ]
C = US
ST = IL
L = Chicago
O = Toolness
OU = Experimental Software Authority
CN = %(domain)s
emailAddress = varmaa#toolness.com
[ v3_req ]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = #alt_names
[ alt_names ]
DNS.1 = %(domain)s
DNS.2 = *.%(domain)s
"""
MYDIR = os.path.abspath(os.path.dirname(__file__))
OPENSSL = '/usr/bin/openssl'
KEY_SIZE = 1024
DAYS = 3650
CA_CERT = 'ca.cert'
CA_KEY = 'ca.key'
# Extra X509 args. Consider using e.g. ('-passin', 'pass:blah') if your
# CA password is 'blah'. For more information, see:
#
# http://www.openssl.org/docs/apps/openssl.html#PASS_PHRASE_ARGUMENTS
X509_EXTRA_ARGS = ()
def openssl(*args):
cmdline = [OPENSSL] + list(args)
subprocess.check_call(cmdline)
def gencert(domain, rootdir=MYDIR, keysize=KEY_SIZE, days=DAYS,
ca_cert=CA_CERT, ca_key=CA_KEY):
def dfile(ext):
return os.path.join('domains', '%s.%s' % (domain, ext))
os.chdir(rootdir)
if not os.path.exists('domains'):
os.mkdir('domains')
if not os.path.exists(dfile('key')):
openssl('genrsa', '-out', dfile('key'), str(keysize))
# EDIT 3: mydomain.key gets output here during execution
config = open(dfile('config'), 'w')
config.write(OPENSSL_CONFIG_TEMPLATE % {'domain': domain})
config.close()
# EDIT 3: mydomain.config gets output here during execution
openssl('req', '-new', '-key', dfile('key'), '-out', dfile('request'),
'-config', dfile('config'))
# EDIT 3: mydomain.request gets output here during execution
openssl('x509', '-req', '-days', str(days), '-in', dfile('request'),
'-CA', ca_cert, '-CAkey', ca_key,
'-set_serial',
'0x%s' % hashlib.md5(domain +
str(datetime.datetime.now())).hexdigest(),
'-out', dfile('cert'),
'-extensions', 'v3_req', '-extfile', dfile('config'),
*X509_EXTRA_ARGS)
# EDIT 3: mydomain.cert gets output here during execution
print "Done. The private key is at %s, the cert is at %s, and the " \
"CA cert is at %s." % (dfile('key'), dfile('cert'), ca_cert)
if __name__ == "__main__":
if len(sys.argv) < 2:
print "usage: %s <domain-name>" % sys.argv[0]
sys.exit(1)
gencert(sys.argv[1])
EDIT 2:
Regarding John's comment, "this might mean that those files are being created, but not in the directory [I] expect":
When I have the directory open in another window, I see the files pop up in the correct location during execution. In addition, when running the test.py script a second time with no changes, the files are identified as present in the correct (the same) location. This leads me to believe that file location is not the problem. Thanks for the suggestion. I'll keep looking.
EDIT 3:
I stepped through the gencert.py program, and each of the files are correctly output at the right time during execution. I indicated when exactly they were output within the above file, labeled with "EDIT 3"
While gencert is paused awaiting my input (raw_input), I can open/view/edit the mentioned files in another program with no problem.
In addition, with the first test.py instance running (paused, waiting for user input, just after mydomain.cert appears), I can run a second instance of test.py in another terminal and it sees/uses the files just fine.
Within the first instance, however, if I continue the program it outputs "FileNotFoundError."
The problem contained in the above stems from the use of os.chdir(rootdir) as suggested by John; however, the specifics are slightly different than the created files being in the wrong location. The problem is the current working directory (cwd) of the running program being changed by gencert(). Here are the specifics:
The program is started with test.py, which calls verifyCerts(). At this point the program is running in the current directory of whichever folder test.py is running inside of. Use os.getcwd() to find the current directory at this point. In this case (as an example), it is running in:
/home/name/testfolder/
Next, os.path.isfile() looks for the files "certs/mydomain.cert" and "certs/mydomain.key"; based on the file path above [e.g., the cwd], it is looking for the following files:
/home/name/testfolder/certs/mydomain.cert
/home/name/testfolder/certs/mydomain.key
The above files are not present, so the program executes gencert.gencert(rootDomain) which correctly creates both files as expected in the exact locations mentioned above in number 2.
The problem is indeed the os.chdir() call: When gencert() executes, it uses os.chdir() to change the cwd to "rootdir," which is os.path.abspath(os.path.dirname(__file__)), which is the directory of the current file (gencert.py). Since this file is located a folder deeper, the new cwd becomes:
/home/name/testfolder/certs/
When gencert() finishes execution and the control returns to test.py, the cwd never changes again; the cwd remains as /home/name/testfolder/certs/ even when execution returns to test.py.
Later, when ssl.wrap_socket() tries to find the serverCert and serverKey, it looks for "certs/mydomain.cert" and "certs/mydomain.key" in the cwd, so here is the full path of what it is looking for:
/home/name/testfolder/certs/certs/mydomain.cert
/home/name/testfolder/certs/certs/mydomain.key
These two files are NOT present, so the program correctly returns "FileNotFoundError".
Solution
A) Move the "gencert.py" file to the same directory as "test.py"
B) At the beginning of "gencert.py" add cwd = os.getcwd() to record the original cwd of the program; then, at the very end, add os.chdir(cwd) to change back to the original cwd before ending and giving control back to the calling program.
I went with option 'B', and my program now works flawlessly. I appreciate the assistance from John Gordon to point me toward finding the source of my problem.
I have just installed ocropus OCR with all dependencies in my windows 7 machine. (I am using 32bit python 2.7) It seems to be working fine except that I cannot load the default OCR model: en-default.pyrnn.gz. , and receiving a Traceback. I am using the following syntax:
python ocropus-rpred -m en-default.pyrnn.gz book\0001\*.png
here is the error
INFO: #inputs47
# loading object /usr/local/share/ocropus/en-default.pyrnn.gz
Traceback (most recent call last):
File "ocropus-rpred" line 109, in <module>
network = ocrolib.load_object(args.model,verbose=1)
File "C:\anaconda32\lib\site-packages\ocrolib\common.py", line 513, in load_object
return unpickler.load()
EOFError
I have checked the file is not empty; also double checked the binary mode flag enabled i.e. "wb" and "rb"; also converted the newlines of common.py using dos2unix. I am being unable to unable to solve this problem. If anyone have expereinced similar issues, kindly share.
import cPickle
import gzip
def save_object(fname,obj,zip=0):
if zip==0 and fname.endswith(".gz"):
zip = 1
if zip>0:
# with gzip.GzipFile(fname,"wb") as stream:
with os.popen("gzip -9 > '%s'"%fname,"wb") as stream:
cPickle.dump(obj,stream,2)
else:
with open(fname,"wb") as stream:
cPickle.dump(obj,stream,2)
def unpickle_find_global(mname,cname):
if mname=="lstm.lstm":
return getattr(lstm,cname)
if not mname in sys.modules.keys():
exec "import "+mname
return getattr(sys.modules[mname],cname)
def load_object(fname,zip=0,nofind=0,verbose=0):
"""Loads an object from disk. By default, this handles zipped files
and searches in the usual places for OCRopus. It also handles some
class names that have changed."""
if not nofind:
fname = ocropus_find_file(fname)
if verbose:
print "# loading object",fname
if zip==0 and fname.endswith(".gz"):
zip = 1
if zip>0:
# with gzip.GzipFile(fname,"rb") as stream:
with os.popen("gunzip < '%s'"%fname,"rb") as stream:
unpickler = cPickle.Unpickler(stream)
unpickler.find_global = unpickle_find_global
return unpickler.load()
else:
with open(fname,"rb") as stream:
unpickler = cPickle.Unpickler(stream)
unpickler.find_global = unpickle_find_global
return unpickler.load()
UPDATE: Hi, please note that I have used Python's native gzip, and it is working fine. Thank you for pointing that out. Here is the correct syntax that is working on Windows: {with gzip.GzipFile(fname,"rb") as stream:}
Your use of gunzip (in the load_object function) is incorrect. Unless passed the -c argument, gunzip writes the decompressed data to a new file, not to its stdout (which is what you seem to be attempting to do).
As a result, it doesn't write anything to its stdout, and your stream variable contains no data, hence the EOFError.
A quick fix is to change your gunzip command line to give it the -c argument.
More info here: http://linux.die.net/man/1/gzip
That said, why are you even shelling out to gunzip to decompress your data? Python's built-in gzip module should handle that without problems.
I have the following structure
C:\Users\dhiwakarr\workspace\BasicRegressionOnJoker\create&&bkp\script1.py
script1.py will call a function/method defined in script2.py which is located in
C:\Users\dhiwakarr\workspace\basics\script2.py
The problem is script2.py will make use of an XML File (create.xml) which is located in the same folder as script2.py. But when I call this method IN script2.py FROM script1.py. I get the following error,
execute: Error 0x304: Failed to read the input file[createsc.xml].
Traceback (most recent call last):
File "create&&bkp.py", line 19, in <module>
CreateSC.create()
My guess is that the called script (script2.py) is searching for this file in the calling script (script1.py). How do I make the method of script2.py called in script1.py make it search in its own directory ?
UPDATE
script1.sc
import subprocess,sys,getopt,codecs,re,string
import xml.etree.ElementTree as ET
sys.path.insert(0,r'C:\Users\dhiwakarr\workspace\basics')
import Login
import script2
#import script3
try:
#First call the login script to login
print('Login started')
Login.login()
print('Create Subclient')
script2.create()
....
script2.py
import subprocess,sys,os,inspect
from sys import stdout
from _winapi import NULL
def create():
'''
A text file with information about the Client,Storage Policy,Backupset,Subclient & Content of each subclient must be given as seen in sample-create.txt
'''
inputfile = r'C:\Users\dhiwakarr\workspace\create.txt'
finp = open(inputfile,'r')
path = str(os.getcwd())
print('Current Working Path is -- '+path)
for line in finp:
line=line.rstrip('\n')
....
# Creating the Subclient
subprocess.check_call(["C:\\Program Files\\CommVault\\Simpana\\Base\\qoperation.exe", 'execute', '-af', `'createsc.xml',` '-appName', "'File System'",'-clientName', client,'-backupsetName', bset, '-subclientName', scname, '-storagePolicyName', storagepolicy])
else:
See the line subprocess.check_call(["... it fails to read the XML.
The information given is sparse and more code would be helpful.
In general when calling a script from the command line, the base path will be the path, the shell is directed to not the path of calling.
To get information about where your script is looking for files, insert
print os.getcwd()
at the appropriate places (before open or file command). You need to have imported 'os' from the battery pack though.
Furthermore, to get better understanding of the underlying problem, using a
try:
f = open(f)
…
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
might give better understanding of the underlying problem.
your added code information:
Changes of sys.Path (.extend; .insert) will not change the directory for file I/O. sys.Path is directing the module loader only. Use os.chdir or relative paths for file I/O. Use the above print os.getcwd() method get further information about where your code is looking for the .xml file.
I currently have a project that runs for several days. As errors might occur in some steps of execution, I pickle critical steps to be able to restart the script at the correct step (so I don't have to do the work of eventually over 24h of execution again).
One thing I store with pickle is a list steps. This list contains every step that successfully finished. It is used to skip steps when I start the script again.
The problem is that pickle seems not to update this after I switch the module.
Code
mainscript.py
import subscript
def set_status(mysql_script_instance_id, status, state=None):
# [...]
# update status in database (works as expected)
# [...]
if state is not None:
with open("state.pickle", "wb") as f:
pickle.dump(state, f)
logging.debug("Dumped pickle. steps_done: %s" % state['steps_done'])
logging.info(status)
subscript.py
import mainscript
[...]
logging.info("%s finished." % (step.__name__))
self.state['steps_done'].append(step.__name__)
[...]
logging.debug("self.state['steps_done'] = %s" % self.state['steps_done'])
mainscript.set_status(self.mysql_script_instance_id, "step xy done", self.state)
pickleviewer
#!/usr/bin/env python
import pickle
import pprint
state = pickle.load(open("state.pickle", "rb"))
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(state)
What I've tried
I get all log messages I've expected:
2014-04-03 08:23:07,727 INFO: step1 finished.
2014-04-03 08:23:07,728 DEBUG: self.state['steps_done'] = ['Fetch recordings', 'preparation', 'step1']
2014-04-03 08:23:07,927 DEBUG: Dumped pickle. steps_done: ['Fetch recordings', 'preparation', 'step1']
but when I look at the pickle file, I get:
{ [...]
'steps_done': ['Fetch recordings', 'preparation'],
[...]}
What could be the error? What can I do to find the error?
(If open would not work, I would get an exception, right?)
Use absolute file paths to open your pickle files; by using relative paths you now are writing a pickle file in the current working directory.
You can write in the same location as the script by basing the path of the __file__ global:
import os
here = os.path.dirname(os.path.abspath(__file__))
then use
with open(os.path.join(here, "state.pickle"), "wb") as f:
to create an absolute path to a pickle file in the same directory.