I simulated a batch of 5 FMUs by putting a function containing the fmpy simulate in a loop.
4 of them simulate correctly. The fifth one due to a wrong parameter shows an error.
My question is, is there any way to find out in which FMU the error comes during simulation
Ideally the output should look like:
FMU1: OK
FMU2: OK
FMI3: 'Error' and so on
def wf(i):
result = simulate_fmu(
fmupath[i],
validate=False,
start_time=0,
stop_time=endtime,
solver='CVode',
output_interval=stepsize,
record_events=False,
start_values = parameters[i],
output = resultvariables,
)
dfindres = pd.DataFrame.from_dict(result)
return dfindres
results = [wf(i) for i in range(5)]
You can catch the exception and return the status message together with the result like so
def wf(i):
try:
result = simulate_fmu(
fmupath[i],
validate=False,
start_time=0,
stop_time=endtime,
solver='CVode',
output_interval=stepsize,
record_events=False,
start_values=parameters[i],
output=resultvariables,
)
except Exception as e:
return None, "FMU%d: Error" % i
dfindres = pd.DataFrame.from_dict(result)
return dfindres, "FMU%d: OK" % i
# list of tuples (result, status)
results = [wf(i) for i in range(5)]
Related
def _get_trace(self) -> None:
"""Retrieves the stack trace via debug_traceTransaction and finds the
return value, revert message and event logs in the trace.
"""
# check if trace has already been retrieved, or the tx warrants it
if self._raw_trace is not None:
return
self._raw_trace = []
if self.input == "0x" and self.gas_used == 21000:
self._modified_state = False
self._trace = []
return
if not web3.supports_traces:
raise RPCRequestError("Node client does not support `debug_traceTransaction`")
try:
trace = web3.provider.make_request( # type: ignore
"debug_traceTransaction", (self.txid, {"disableStorage": CONFIG.mode != "console"})
)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
msg = f"Encountered a {type(e).__name__} while requesting "
msg += "`debug_traceTransaction`. The local RPC client has likely crashed."
if CONFIG.argv["coverage"]:
msg += " If the error persists, add the `skip_coverage` marker to this test."
raise RPCRequestError(msg) from None
if "error" in trace:
self._modified_state = None
self._trace_exc = RPCRequestError(trace["error"]["message"])
raise self._trace_exc
self._raw_trace = trace = trace["result"]["structLogs"]
if not trace:
self._modified_state = False
return
# different nodes return slightly different formats. its really fun to handle
# geth/nethermind returns unprefixed and with 0-padding for stack and memory
# erigon returns 0x-prefixed and without padding (but their memory values are like geth)
fix_stack = False
for step in trace:
if not step["stack"]:
continue
check = step["stack"][0]
if not isinstance(check, str):
break
if check.startswith("0x"):
fix_stack = True
> c:\users\xxxx\appdata\local\programs\python\python310\lib\site-packages\brownie\network\transaction.py(678)_get_trace()
-> step["pc"] = int(step["pc"], 16)
(Pdb)
I am doing Patricks Solidity course and ran into this error. I ended up copying and pasting his code:
def test_only_owner_can_withdraw():
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
pytest.skip("only for local testing")
fund_me = deploy_fund_me()
bad_actor = accounts.add()
with pytest.raises(exceptions.VirtualMachineError):
fund_me.withdraw({"from": bad_actor})
Pytest worked for my other tests however When I tried to do this one it wouldnt work.
Ok, So after looking at my scripts and contracts I found the issue. The was an issue with my .sol contract and instead of returning a variable, it was returning the error message from my retrieve function in the contract. Its fixed and working now
I'm using Python 3.7 SDK of apache beam 2.17.0 for dataflow. Code is running locally, but I gather data from pubsub. I try to combine per key and everything goes fine until the pipeline calls the "merge_accumulators" function. From this point on, all the underlying code is executed twice.
After debugging and going deep in the source code, I found the task is not properly finalized and that is why it is executed twice.
This is the pipeline code:
options = {
"runner": "DirectRunner",
"streaming": True,
"save_main_session": True
}
p = beam.Pipeline(options = PipelineOptions(flags=[], **options))
processRows = (p
|'Read from topic' >> beam.io.ReadFromPubSub(subscription=get_subscription_address())
|'Filter do not track' >> beam.ParDo(TakeOutNoTrack)
|'Map Data' >> beam.ParDo(mapData)
|'Filter metatags' >> beam.ParDo(filterMetatags)
|'Label admin' >> beam.ParDo(labelAdmin)
|'Process row' >> beam.ParDo(processRow)
)
sessionRow = (processRows
|'Add timestamp' >> beam.Map(lambda x: window.TimestampedValue(x, x['timestamp']))
|'Key on uuid' >> beam.Map(lambda x: (x['capture_uuid'], x))
|'User session window' >> beam.WindowInto(window.Sessions(config_triggers['session_gap']),
trigger=trigger.AfterWatermark(
early=trigger.AfterCount(config_triggers['after_count'])),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
|'CombineValues' >> beam.CombinePerKey(JoinSessions())
)
printing = (sessionRow
|'Printing' >> beam.Map(lambda x: print(x))
)
print('running pipeline')
p.run().wait_until_finish()
print('done running the pipeline')
return
This is the config_triggers:
config_triggers = {
"session_gap": 1320,
"after_count": 1,
"session_length": 20
}
This is the combine class:
class JoinSessions(beam.CombineFn):
def define_format(self):
try:
data = {
"session_uuid": [],
"capture_uuid": "",
"metatags": [],
"timestamps": [],
"admin": []
}
return data
except Exception:
logging.error("error at define data: \n%s" % traceback.format_exc())
def create_accumulator(self):
try:
return self.define_format()
except Exception:
logging.error("error at create accumulator: \n%s " % traceback.format_exc())
def add_input(self, metatags, input):
try:
metatags["session_uuid"].append(input.get('session_uuid'))
metatags["capture_uuid"] = input.get('capture_uuid')
metatags["metatags"].append(input.get('metatags'))
metatags["timestamps"].append(input.get('timestamp'))
metatags["admin"].append(input.get('admin'))
print('test add_input')
return metatags
except Exception:
logging.error("error at add input: \n%s" % traceback.format_exc())
def merge_accumulators(self, accumulators):
# print(accumulators)
try:
global test_counter
tags_accumulated = self.define_format()
for tags in accumulators:
tags_accumulated["session_uuid"] += tags['session_uuid']
tags_accumulated["capture_uuid"] += tags['capture_uuid']
tags_accumulated["metatags"] += tags['metatags']
tags_accumulated["timestamps"] += tags['timestamps']
tags_accumulated["admin"] += tags['admin']
test_counter += 1
print('counter = ', test_counter)
return tags_accumulated
except Exception:
logging.error("Error at merge Accumulators: \n%s" % traceback.format_exc())
def extract_output(self, metatags):
try:
# print('New input in the pipeline:')
# print('Extract_output: ')
# print(metatags, '\n')
return metatags
except Exception:
logging.error("error at return input: \n%s" % traceback.format_exc())
No errors are thrown nor exceptions or some kind of information. Just the output of the 'printing' label is printed twice. Also the global counter goes up two times, but there is just one data entry in the pipeline.
The print on the add_input function is executed just once.
I'm new to dataflow, so, sorry if I made a dumb mistake.
I think this is due to the trigger you have set.
trigger=trigger.AfterWatermark(early=trigger.AfterCount(config_triggers['after_count'])),accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
config_triggers['after_count] is 1.
So you've set up a trigger that fires after every 1 element and you also accumulate elements produced by trigger firings. So a second trigger within the same window will include elements from the first trigger and so on. See following for details regarding setting the trigger of your pipeline correctly according to your use-case.
https://beam.apache.org/documentation/programming-guide/#triggers
I have a function called analyze() which is like following:
def analyze():
for stmt in irsb.statements:
if isinstance(stmt, pyvex.IRStmt.WrTmp):
wrtmp(stmt)
if isinstance(stmt, pyvex.IRStmt.Store):
address = stmt.addr
address1 = '{}'.format(address)[1:]
print address1
data = stmt.data
data1 = '{}'.format(data)[1:]
tmp3 = store64(address1, int64(data1))
if isinstance(stmt, pyvex.IRStmt.Put):
expr = stmt.expressions[0]
putoffset = stmt.offset
data = stmt.data
data4 = '{}'.format(data)[1:]
if (str(data).startswith("0x")):
#const_1 = ir.Constant(int64, data4)
tmp = put64(putoffset, ZERO_TAG)
else:
put64(putoffset, int64(data4))
if isinstance(stmt.data, pyvex.IRExpr.Const):
reg_name = irsb.arch.translate_register_name(stmt.offset, stmt.data.result_size(stmt.data.tag))
print reg_name
stmt.pp()
This code function gets following input and try to analyze it:
CODE = b"\xc1\xe0\x05"
irsb = pyvex.block.IRSB(CODE, 0x80482f0, archinfo.ArchAMD64())
When this input is in the same file in my code (lets call the whole as analyze.py) it works and python analyze.py will make me an output. However, I want to make a seperate file(call array.py), call analyze there and also put the inputs inside it and run python array.py to get the same result. I did the following for array.py:
from analyze import analyze
CODE = b"\xc1\xe0\x05"
irsb = pyvex.block.IRSB(CODE, 0x80482f0, archinfo.ArchAMD64())
analyze()
However, when I run the array.py, it stops me with error;
NameError: name 'CODE' is not defined
how can I resolve this problem? What is the solution?
A simple change in your function, add parameters:
def analyze(irsb): # irsb here called parameter
...
# The rest is the same
And then pass arguments when calling it:
from analyze import analyze
CODE = b"\xc1\xe0\x05"
irsb_as_arg = pyvex.block.IRSB(CODE, 0x80482f0, archinfo.ArchAMD64())
analyze(irsb_as_arg) # irsb_as_arg is an argument
I have just changed here irsb to irsb_as_arg to take attention, but it can be the same name
I was making a site component scanner with Python. Unfortunately, something goes wrong when I added another value to my script. This is my script:
#!/usr/bin/python
import sys
import urllib2
import re
import time
import httplib
import random
# Color Console
W = '\033[0m' # white (default)
R = '\033[31m' # red
G = '\033[1;32m' # green bold
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
#Bad HTTP Responses
BAD_RESP = [400,401,404]
def main(path):
print "[+] Testing:",host.split("/",1)[1]+path
try:
h = httplib.HTTP(host.split("/",1)[0])
h.putrequest("HEAD", "/"+host.split("/",1)[1]+path)
h.putheader("Host", host.split("/",1)[0])
h.endheaders()
resp, reason, headers = h.getreply()
return resp, reason, headers.get("Server")
except(), msg:
print "Error Occurred:",msg
pass
def timer():
now = time.localtime(time.time())
return time.asctime(now)
def slowprint(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush() # defeat buffering
time.sleep(8./90)
print G+"\n\t Whats My Site Component Scanner"
coms = { "index.php?option=com_artforms" : "com_artforms" + "link1","index.php?option=com_fabrik" : "com_fabrik" + "ink"}
if len(sys.argv) != 2:
print "\nUsage: python jx.py <site>"
print "Example: python jx.py www.site.com/\n"
sys.exit(1)
host = sys.argv[1].replace("http://","").rsplit("/",1)[0]
if host[-1] != "/":
host = host+"/"
print "\n[+] Site:",host
print "[+] Loaded:",len(coms)
print "\n[+] Scanning Components\n"
for com,nme,expl in coms.items():
resp,reason,server = main(com)
if resp not in BAD_RESP:
print ""
print G+"\t[+] Result:",resp, reason
print G+"\t[+] Com:",nme
print G+"\t[+] Link:",expl
print W
else:
print ""
print R+"\t[-] Result:",resp, reason
print W
print "\n[-] Done\n"
And this is the error message that comes up:
Traceback (most recent call last):
File "jscan.py", line 69, in <module>
for com,nme,expl in xpls.items():
ValueError: need more than 2 values to unpack
I already tried changing the 2 value into 3 or 1, but it doesn't seem to work.
xpls.items returns a tuple of two items, you're trying to unpack it into three. You initialize the dict yourself with two pairs of key:value:
coms = { "index.php?option=com_artforms" : "com_artforms" + "link1","index.php?option=com_fabrik" : "com_fabrik" + "ink"}
besides, the traceback seems to be from another script - the dict is called xpls there, and coms in the code you posted...
you can try
for (xpl, poc) in xpls.items():
...
...
because dict.items will return you tuple with 2 values.
You have all the information you need. As with any bug, the best place to start is the traceback. Let's:
for com,poc,expl in xpls.items():
ValueError: need more than 2 values to unpack
Python throws ValueError when a given object is of correct type but has an incorrect value. In this case, this tells us that xpls.items is an iterable an thus can be unpacked, but the attempt failed.
The description of the exception narrows down the problem: xpls has 2 items, but more were required. By looking at the quoted line, we can see that "more" is 3.
In short: xpls was supposed to have 3 items, but has 2.
Note that I never read the rest of the code. Debugging this was possible using only those 2 lines.
Learning to read tracebacks is vital. When you encounter an error such as this one again, devote at least 10 minutes to try to work with this information. You'll be repayed tenfold for your effort.
As already mentioned, dict.items() returns a tuple with two values. If you use a list of strings as dictionary values instead of a string, which should be split anyways afterwards, you can go with this syntax:
coms = { "index.php?option=com_artforms" : ["com_artforms", "link1"],
"index.php?option=com_fabrik" : ["com_fabrik", "ink"]}
for com, (name, expl) in coms.items():
print com, name, expl
>>> index.php?option=com_artforms com_artforms link1
>>> index.php?option=com_fabrik com_fabrik ink
I am a Python re-newbie. I would like advice on handling program parameters which are in a file in json format. Currently, I am doing something like what is shown below, however, it seems too wordy, and the idea of typing the same literal string multiple times (sometimes with dashes and sometimes with underscores) seems juvenile - error prone - stinky... :-) (I do have many more parameters!)
#!/usr/bin/env python
import sys
import os
import json ## for control file parsing
# control parameters
mpi_nodes = 1
cluster_size = None
initial_cutoff = None
# ...
#process the arguments
if len(sys.argv) != 2:
raise Exception(
"""Usage:
run_foo <controls.json>
Where:
<control.json> is a dictionary of run parameters
"""
)
# We expect a .json file with our parameters
controlsFileName = sys.argv[1]
err = ""
err += "" #validateFileArgument(controlsFileName, exists=True)
# read in the control parameters from the .json file
try:
controls = json.load(open(controlsFileName, "r"))
except:
err += "Could not process the file '" + controlsFileName + "'!\n"
# check each control parameter. The first one is optional
if "mpi-nodes" in controls:
mpi_nodes = controls["mpi-nodes"]
else:
mpi_nodes = controls["mpi-nodes"] = 1
if "cluster-size" in controls:
cluster_size = controls["cluster-size"]
else:
err += "Missing control definition for \"cluster-size\".\n"
if "initial-cutoff" in controls:
initial_cutoff = controls["initial-cutoff"]
else:
err += "Missing control definition for \"initial-cutoff\".\n"
# ...
# Quit if any of these things were not true
if len(err) > 0:
print err
exit()
#...
This works, but it seems like there must be a better way. I am stuck with the requirements to use a json file and to use the hyphenated parameter names. Any ideas?
I was looking for something with more static binding. Perhaps this is as good as it gets.
Usually, we do things like this.
def get_parameters( some_file_name ):
source= json.loads( some_file_name )
return dict(
mpi_nodes= source.get('mpi-nodes',1),
cluster_size= source['cluster-size'],
initial_cutoff = source['initial-cutoff'],
)
controlsFileName= sys.argv[1]
try:
params = get_parameters( controlsFileName )
except IOError:
print "Could not process the file '{0}'!".format( controlsFileName )
sys.exit( 1 )
except KeyError, e:
print "Missing control definition for '{0}'.".format( e.message )
sys.exit( 2 )
A the end params['mpi_nodes'] has the value of mpi_nodes
If you want a simple variable, you do this. mpi_nodes = params['mpi_nodes']
If you want a namedtuple, change get_parameters like this
def get_parameters( some_file_name ):
Parameters= namedtuple( 'Parameters', 'mpi_nodes, cluster_size, initial_cutoff' )
return Parameters( source.get('mpi-nodes',1),
source['cluster-size'],
source['initial-cutoff'],
)
I don't know if you'd find that better or not.
the argparse library is nice, it can handle most of the argument parsing and validation for you as well as printing pretty help screens
[1] http://docs.python.org/dev/library/argparse.html
I will knock up a quick demo showing how you'd want to use it this arvo.
Assuming you have many more parameters to process, something like this could work:
def underscore(s):
return s.replace('-','_')
# parameters with default values
for name, default in (("mpi-nodes", 1),):
globals()[underscore(name)] = controls.get(name, default)
# mandatory parameters
for name in ("cluster-size", "initial-cutoff"):
try:
globals()[underscore(name)] = controls[name]
except KeyError:
err += "Missing control definition for %r" % name
Instead of manipulating globals, you can also make this more explicit:
def underscore(s):
return s.replace('-','_')
settings = {}
# parameters with default values
for name, default in (("mpi-nodes", 1),):
settings[underscore(name)] = controls.get(name, default)
# mandatory parameters
for name in ("cluster-size", "initial-cutoff"):
try:
settings[underscore(name)] = controls[name]
except KeyError:
err += "Missing control definition for %r" % name
# print out err if necessary
mpi_nodes = settings['mpi_nodes']
cluster_size = settings['cluster_size']
initial_cutoff = settings['initial_cutoff']
I learned something from all of these responses - thanks! I would like to get feedback on my approach which incorporates something from each suggestion. In addition to the conditions imposed by the client, I want something:
1) that is fairly obvious to use and to debug
2) that is easy to maintain and modify
I decided to incorporate str.replace, namedtuple, and globals(), creating a ControlParameters namedtuple in the globals() namespace.
#!/usr/bin/env python
import sys
import os
import collections
import json
def get_parameters(parameters_file_name ):
"""
Access all of the control parameters from the json filename given. A
variable of type namedtuple named "ControlParameters" is injected
into the global namespace. Parameter validation is not performed. Both
the names and the defaults, if any, are defined herein. Parameters not
found in the json file will get values of None.
Parameter usage example: ControlParameters.cluster_size
"""
parameterValues = json.load(open(parameters_file_name, "r"))
Parameters = collections.namedtuple( 'Parameters',
"""
mpi_nodes
cluster_size
initial_cutoff
truncation_length
"""
)
parameters = Parameters(
parameterValues.get(Parameters._fields[0].replace('_', '-'), 1),
parameterValues.get(Parameters._fields[1].replace('_', '-')),
parameterValues.get(Parameters._fields[2].replace('_', '-')),
parameterValues.get(Parameters._fields[3].replace('_', '-'))
)
globals()["ControlParameters"] = parameters
#process the program argument(s)
err = ""
if len(sys.argv) != 2:
raise Exception(
"""Usage:
foo <control.json>
Where:
<control.json> is a dictionary of run parameters
"""
)
# We expect a .json file with our parameters
parameters_file_name = sys.argv[1]
err += "" #validateFileArgument(parameters_file_name, exists=True)
if err == "":
get_parameters(parameters_file_name)
cp_dict = ControlParameters._asdict()
for name in ControlParameters._fields:
if cp_dict[name] == None:
err += "Missing control parameter '%s'\r\n" % name
print err
print "Done"