I do not know why the test does not work. It seems to me that I do everything right (according to the documentation).
In unittest everything works correctly but pytest is more advance so I want to change.
import requests
import pytest
def get_historical_currency_rate(currency_code, currency_date) :
url = requests.get(
f'http://api.nbp.pl/api/exchangerates/rates/a/{currency_code}/{currency_date}/?format=json')
r = url.json()
rate = r['rates'][0]['mid']
return round(rate, 2)
#pytest.fixture
def currency_loop_helper():
dates_rate = ['2018-05-25', '2017-02-20', '2013-12-11']
currencies_codes = ['JPY', 'AUD', 'GBP']
expected_rates = [0.03, 3.76, 4.44]
actual_rates = []
for i in range(len(dates_rate)):
result = get_historical_currency_rate(currencies_codes[i], dates_rate[i])
actual_rates.append(result)
actual_list = [(a, b) for a, b in zip(actual_rates, expected_rates)]
return actual_list
#pytest.mark.parametrize('expected, actual', currency_loop_helper)
def test_currency_rate_equal(expected, actual):
assert expected == actual
ERRORS
"...ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues
E TypeError: 'function' object is not iterable
=============================== warnings summary ===============================
/usr/lib/python3/dist-packages/urllib3/util/selectors.py:14
/usr/lib/python3/dist-packages/urllib3/util/selectors.py:14: DeprecationWarning:Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
from collections import namedtuple, Mapping
/usr/lib/python3/dist-packages/socks.py:58
/usr/lib/python3/dist-packages/socks.py:58: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
from collections import Callable
I believe you don't need to make currency_loop_helper a fixture. You can then call the function in the parametrize decorator over test_currency_rate_equal. The suggested code change would look like this:
import requests
import pytest
def get_historical_currency_rate(currency_code, currency_date) :
url = requests.get(
f'http://api.nbp.pl/api/exchangerates/rates/a/{currency_code}/{currency_date}/?format=json')
r = url.json()
rate = r['rates'][0]['mid']
return round(rate, 2)
def currency_loop_helper():
dates_rate = ['2018-05-25', '2017-02-20', '2013-12-11']
currencies_codes = ['JPY', 'AUD', 'GBP']
expected_rates = [0.03, 3.76, 4.44]
actual_rates = []
for i in range(len(dates_rate)):
result = get_historical_currency_rate(currencies_codes[i], dates_rate[i])
actual_rates.append(result)
actual_list = [(a, b) for a, b in zip(actual_rates, expected_rates)]
return actual_list
#pytest.mark.parametrize('expected, actual', currency_loop_helper())
def test_currency_rate_equal(expected, actual):
assert expected == actual
Related
I am trying to run DESeq2 through rpy2 for the first time and am having some difficulties.
class py_DESeq2:
def __init__(self, count_matrix):
self.dds = None
self.normalized_count_matrix = None
self.vsd = None
self.count_matrix = robjects.conversion.py2rpy(count_matrix)
self.design_matrix = robjects.conversion.py2rpy(pd.DataFrame({'treatment':['ctrl' for i in range(count_matrix.shape[1])]}))
self.design_formula = Formula('~ 1')
def norm_counts(self, **kwargs):
self.dds = deseq.DESeqDataSetFromMatrix(countData=self.count_matrix, colData=self.design_matrix, design=self.design_formula)
self.vsd = deseq.varianceStabilizingTransformation(self.dds, blind=True)
self.normed_count_matrix = deseq.assay(self.vsd)
self.normed_count_matrix = to_dataframe(self.normed_count_matrix)
self.normed_count_matrix = robjects.conversion.rpy2py(self.normed_count_matrix)
I get the following error at self.normed_count_matrix = deseq.assay(self.vsd):
module 'DESeq2' has no attribute 'assay'
The below code in R runs fine:
library(DESeq2)
countData <- read.delim("0.333404867983521.R.data.in.txt")
colData <- read.delim("0.333404867983521.R.groups.in.txt")
dds <- DESeqDataSetFromMatrix(countData, colData,design=~Treatment,tidy=TRUE)
norm <- varianceStabilizingTransformation(dds,blind=TRUE)
norm_matrix <- assay(norm)
norm_df <- data.frame(Gene=rownames(norm_matrix), norm_matrix)
write.table(norm_df, "0.333404867983521.R.data.out.txt", row.names = FALSE,sep="\t")
The norm object is a <class 'rpy2.robjects.methods.RS4'>.
There must be something I am missing here and a point in the right direction would be appreciated!
If you open R and type:
library(DESeq2)
assay
you will see that the assay function is not actually coming from DESeq2, but from its dependency which is called SummarizedExperiment:
> assay
standardGeneric for "assay" defined from package "SummarizedExperiment"
function (x, i, withDimnames = TRUE, ...)
standardGeneric("assay")
<bytecode: 0x5586a354db90>
<environment: 0x5586a3535e20>
Methods may be defined for arguments: x, i
Use showMethods("assay") for currently available ones.
you can confirm that assay is not part of DESeq2 by using explicit namespaces in R:
> DESeq2::assay
Error: 'assay' is not an exported object from 'namespace:DESeq2'
and to confirm that it is indeed a part of SummarizedExperiment:
> SummarizedExperiment::assay
standardGeneric for "assay" defined from package "SummarizedExperiment"
Therefore in rpy2 you can use it like this:
from rpy2.robjects.packages import importr
summarized_experiment = importr('SummarizedExperiment')
summarized_experiment.assay(self.vsd)
Given an object produced during importing the code, produce the set of imports that are needed to execute that object creation code.
Case 1:
some_obj = module.submodule.Class(42)
get_imports for_object(some_obj)
>>> "import module.submodule"
Case 2 (Sometimes the root module does not import submodules automatically (e.g. Airflow operators)):
some_obj = submodule.Class(42)
get_imports for_object(some_obj)
>>> "from module import submodule"
Case 3 (stretch goal):
some_obj = submodule.Class(sub2.Class2(42))
get_imports for_object(some_obj)
>>> ["from module import submodule", "from module2 import sub2"]
The goal is to produce import lines such that prepending them to object instantiation code will make the instantiation work.
This'll do:
def get_object_imports(obj, sub_obj_class_name=None):
sub_obj_modules = []
if sub_obj_class_name is not None:
for _, attribute_value in obj.__dict__.items():
value_str = str(getattr(attribute_value,'__class__',''))
if ('.' + sub_obj_class_name) in value_str:
sub_obj_modules.append(attribute_value.__module__)
if sub_obj_modules != []:
sub_module_imports = [('import ' + sub_obj_module) for sub_obj_module
in sub_obj_modules]
return ['import ' + obj.__module__] + sub_module_imports
else:
return 'import ' + obj.__module__
Cases (1) & (2) are equivalent, in that running either imports the same module. Note that with above, objects with same class names but different module sources will be included.
Demo:
from module import class1
from other_module import submodule
obj1 = class1()
obj2 = obj1(submodule.class2())
print(get_object_imports(obj2, 'class2'))
# ['import module', 'import other_module.submodule']
I'm trying to keep my code as clean as possible but I'm not completely satisfied with what I achieved so far.
I built a SNMP manager which receive traps from another device using a custom MIB, which I will refer to as MY-MIB.
I am not sure this is the cleanest way, but essentially I have:
from pysnmp.entity import engine, config
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.entity.rfc3413 import ntfrcv, context
from pysnmp.smi import builder, rfc1902
from pysnmp.smi.view import MibViewController
from pysnmp.entity.rfc3413 import mibvar
_snmp_engine = engine.SnmpEngine()
_snmpContext = context.SnmpContext(_snmpEngine)
_mibBuilder = _snmpContext.getMibInstrum().getMibBuilder()
#Add local path where MY-MIB is located
_mibSources = _mibBuilder.getMibSources() + (builder.DirMibSource('.'),)
_mibBuilder.setMibSources(*mibSources)
_mibBuilder.loadModules('MY-MIB')
_view_controller = MibViewController(_mibBuilder)
def my_callback_trap_processor(snmp_engine, state_reference,
context_id, context_name, var_binds, ctx):
#...CALLBACK CODE...
config.addV1System(snmp_engine, 'my-area', 'MYCOMMUNITY')
config.addTargetParams(snmp_engine, 'my-creds', 'my-area',
'noAuthNoPriv', 1)
config.addSocketTransport(snmp_engine,
udp.domainName + (1,),
udp.UdpTransport().openServerMode((IP_ADDRESS,
PORT)))
ntfrcv.NotificationReceiver(snmp_engine, my_callback_trap_processor)
snmp_engine.transportDispatcher.jobStarted(1)
try:
snmp_engine.transportDispatcher.runDispatcher()
except:
snmp_engine.transportDispatcher.closeDispatcher()
raise
In the callback function above I can get a pretty intelligible print by just using the following code:
varBinds = [rfc1902.ObjectType(rfc1902.ObjectIdentity(x[0]), x[1]).resolveWithMib(_view_controller) for x in var_binds]
for varBind in varBinds:
print(varBind.prettyPrint())
which, from a given trap that I receive, gives me:
SNMPv2-MIB::sysUpTime.0 = 0
SNMPv2-MIB::snmpTrapOID.0 = MY-MIB::myNotificationType
MY-MIB::myReplyKey.47746."ABC" = 0x00000000000000000000000000000000000
MY-MIB::myTime.0 = 20171115131544Z
MY-MIB::myOperationMode.0 = 'standalone'
Nice. But I want to manipulate/dissect each bit of information from the given var-binds, especially in a higher level way.
Looking at the innards of the library I was able to gather this code up:
for varBind in var_binds:
objct = rfc1902.ObjectIdentity(varBind[0]).resolveWithMib(self._view_controller)
(symName, modName), indices = mibvar.oidToMibName(
self._view_controller, objct.getOid()
)
print(symName, modName, indices, varBind[1])
that gives me:
sysUpTime SNMPv2-MIB (Integer(0),) 0
snmpTrapOID SNMPv2-MIB (Integer(0),) 1.3.6.1.X.Y.Z.A.B.C.D
myReplyKey MY-MIB (myTimeStamp(47746), myName(b'X00080')) 0x00000000000000000000000000000000000
myTime MY-MIB (Integer(0),) 20171115131544Z
myOperationMode MY-MIB (Integer(0),) 1
and in the case of myReplyKey indexes I can just do a:
for idx in indices:
try:
print(idx.getValue())
except AttributeError:
print(int(idx))
But in the case of the myOperationMode var-bind, how do I get the named-value 'standalone' instead of 1? And how to get the names of the indexes (myTimeStamp and myName)?
Update:
After Ilya's suggestions I researched the library a little bit more for getting the namedValues and, also, I used some Python hacking to get what I was looking for on the indices.
varBinds = [rfc1902.ObjectType(rfc1902.ObjectIdentity(x[0]), x[1]).resolveWithMib(_view_controller) for x in var_binds]
processed_var_binds = []
for var_bind in resolved_var_binds:
object_identity, object_value = var_bind
mod_name, var_name, indices = object_identity.getMibSymbol()
var_bind_dict = {'mib': mod_name, 'name': var_name, 'indices': {}}
for idx in indices:
try:
value = idx.getValue()
except AttributeError:
var_bind_dict['indices'] = int(idx.prettyPrint())
else:
var_bind_dict['indices'][type(value).__name__] = str(value)
try:
var_bind_dict['value'] = object_value.namedValues[object_value]
except (AttributeError, KeyError):
try:
var_bind_dict['value'] = int(object_value.prettyPrint())
except ValueError:
var_bind_dict['value'] = object_value.prettyPrint()
processed_var_binds.append(var_bind_dict)
To resolve SNMP PDU var-bindings against a MIB you can use this snippet what I think you have done already:
from pysnmp.smi.rfc1902 import *
var_binds = [ObjectType(ObjectIdentity(x[0]), x[1]).resolveWithMib(mibViewController)
for x in var_binds]
By this point you have a list of rfc1902.ObjectType objects. The ObjectType instance mimics a two-element tuple: ObjectIdentity and SNMP value object.
var_bind = var_binds[0]
object_identity, object_value = var_bind
Now, getMibSymbol() will give you MIB name, MIB object name and the tuple of indices made up from the trailing part of the OID. Index elements are SNMP value objects just as object_value:
>>> object_identity.getMibSymbol()
('SNMPv2-MIB', 'sysDescr', (0,))
The enumeration, should it present, is reported by .prettyPrint():
>>> from pysnmp.proto.rfc1902 import *
>>> Error = Integer.withNamedValues(**{'disk-full': 1, 'no-disk': -1})
>>> error = Error(1)
>>> error.prettyPrint()
'disk-full'
>>> int(error)
1
I am trying to patch the fun_1 function from the worker_functions dictionary and I seem to be struggling:
cli.py:
import sys
from worker_functions import (
fun_1,
fun_2,
fun_3,
)
FUNCTION_MAP = {
'run_1': fun_1,
'run_2': fun_2,
'run_3': fun_3,
}
def main():
command = sys.argv[1]
tag = sys.argv[2]
action = FUNCTION_MAP[command]
action(tag)
I've tried mocking cli.fun_1 and cli.main.action and cli.action but this is leading to failure.
test_cli.py:
from mock import patch
from cli import main
def make_test_args(tup):
sample_args = ['cli.py']
sample_args.extend(tup)
return sample_args
def test_fun_1_command():
test_args = make_test_args(['run_1', 'fake_tag'])
with patch('sys.argv', test_args),\
patch('cli.fun_1') as mock_action:
main()
mock_action.assert_called_once()
Do I seem to be missing something?
You'll need to patch the references in the FUNCTION_MAP dictionary itself. Use the patch.dict() callable to do so:
from unittest.mock import patch, MagicMock
mock_action = MagicMock()
with patch('sys.argv', test_args),\
patch.dict('cli.FUNCTION_MAP', {'run_1': mock_action}):
# ...
That's because the FUNCTION_MAP dictionary is the location that the function reference is looked up.
Following is the code where py_cpp_bind refers to a piece of code written in C++11 and then binded to python using boost-python (enabled pickling). In order to initialize the object it requires three arguments (filename, int, int). I wished to broadcast this object across the clusters, as this piece is required to perform a computation for each element.
However, on execution Apache Spark seems to complain with
Caused by: java.io.EOFException
at java.io.DataInputStream.readInt(DataInputStream.java:392)
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)
... 15 more
Code:
from pyspark.serializers import BatchedSerializer, PickleSerializer
from pyspark import SparkContext, SparkConf
import py_cpp_bind
def populate_NL(n, tk2):
tk = [list(tk2[0]), tk2[1]]
res = mscore.score(tk[1], tk[0])
return res
def main(n, sc):
mscore = py_cpp_bind.score()
# following line constructs the object from the given arguments
print mscore.init("data/earthquake.csv", n, 4000)
broadcastVar = sc.broadcast(mdl)
C = [((0,), [1])]
C = sc.parallelize(C).flatMap(lambda X : populate(n, X))
print(C.collect())
if __name__ == "__main__":
conf = SparkConf().setMaster("local[*]")
conf = conf.setAppName("TEST")
sc = SparkContext(conf = conf, serializer=PickleSerializer())
n = 5
main(n, sc)