Pytest: How to run variable number of tests defined in json file - python

I'm using the pytest framework to test an executable.
For this executable, I defined multiple test cases in a json file:
{
"Tests": [
{
"name": "test1",
"description" : "writes hello world to file",
"exe" : "%path_to_exe%",
"arguments": "--verbose",
"expression" : "test1.txt",
"referencedir": "%path_to_referencedir%",
"logdir" : "%path_to_logdir%"
},
{
"name": "test2",
"description" : "returns length of hello world string",
"exe" : "path_to_exe",
"arguments": "--verbose",
"expression" : "test2.txt",
"referencedir": "%path_to_referencedir%",
"logdir" : "%path_to_logdir%"
}
]
}
For each of these test cases, the exe should start and execute the expression that is passed by the 'expression' attribute. Its output is written to the logdir (defined by the 'logdir' attribute), which should then be compared with the referencedir. Pytest should then indicate for each of the test cases whether the outputfile in the logdir is identical to to the file in the referencedir.
I'm struggling with making pytest go over each test case one by one.
I'm able to loop over each test, but assertions don't indicate which test exactly is failing.
def test_xxx():
with open('tests.cfg') as f:
data = json.loads(f.read())
for test in data['Tests']:
assert test['name'] == "test1"
Furthermore, I tried to parametrize the input, but I cannot get it to work either:
def load_test_cases():
# Opening JSON file
f = open('tests.cfg')
# returns JSON object as 
# a dictionary
data = json.load(f)
f.close()
return data
#pytest.mark.parametrize("test", load_test_cases())
def test_xxx(test):
assert test['name'] == "test1"
Which returns 'test_json.py::test_xxx[Tests]: string indices must be integers', indicating that it's not really looping over test objects.

I would suggest parametrizing is a better option here. parametrize expects an iterator object, refer to the example and comments added for code.
import json
import pytest
# method returns an iterator
def get_the_test():
with open('tests.cfg') as f:
data = json.loads(f.read())
return iter(data['Tests'])
# use the iterator object to feed into parameters
#pytest.mark.parametrize("test", get_the_test())
def test_xxx(test):
assert test['name'] == "test1"

Storing the individual test cases in a dictionary did the trick:
import json
import pytest
def load_test_cases():
# Opening JSON file
f = open('tests.cfg')
testlist = []
# returns JSON object as 
# a dictionary
data = json.load(f)
for test in data['Tests']:
testlist.append(test)
f.close()
return testlist
#pytest.mark.parametrize("test", load_test_cases())
def test_xxx(test):
assert test['name'] == "test1"

Related

Error when defining a dictionary path as a variable: TypeError: string indices must be integers

I get this error "TypeError: string indices must be integers" when defining a variable.
def updateJson(fileName, pathToValue, updatedValue):
# Opening JSON file
f = open(fileName)
# returns JSON object as a dictionary
data = json.load(f)
# Changes the ID value in JSON
data[pathToValue] = updatedValue
f.close()
with open("template3.json", "w") as outfile:
json.dump(data, outfile)
x = ['Something 1'][0]['ID']
updateJson("Temp\\random.json", x, 9)
JSON:
{
"Something 1": [
{
"ID": "placeholder",
"Music": "placeholder"
}
]
}
But if I don't pass it as variable and just use it in code like this: data['Something 1'][0]['ID'] = updatedValue it works as expected.
What I have tried:
Wrapping the variable in "", (), {} and some other minor things, in which case it kinda works, but the path gets interpreted wrong, and I can't successfully target the ID value in JSON.
The problem has nothing to do with your JSON.
Consider the following code:
y = "Some string"["ID"]
This wouldn't work, right? something like y = "Some string"[1] would set y equal to "o", but the example above is nonsensical.
When you are defining x, this is what's happening. Let's break it down:
x = ["Something 1"]
# x is a list, containing a single string
x = ["Something 1"][0]
# x is the first element of the list ["Something 1"], so x = "Something 1" - see for yourself!
x = ["Something 1"][0]["ID"]
# TypeError! This is equivalent to:
x = "Something 1"["ID"]
To get the functionality you're looking for, we need another way to pass this pathToValue. One way to do this is to pass the different parts as different parameters:
def updateJson(fileName, pathMain, pathIndex, pathMinor, updatedValue):
...
data[pathMain][pathIndex][pathMinor] = updatedValue
...
updateJson("Temp \\random.json", "Something 1", 0, "ID", 9) # Would work
However, this would only work if your JSON file has a very consistent structure.
A slightly more concise way to do this would be:
def updateJson(fileName, pathToValue, updatedValue):
...
pathMain, pathIndex, pathMinor = pathToValue # Extract the different components of pathToValue from the list
data[pathMain][pathIndex][pathMinor] = updatedValue
...
x = ["Something 1", 0, "ID"]
updateJson("Temp \\random.json", x, 9) # Would work
The command bellow does the following:
Creates a list with one item of type str and value "Something 1"
Takes the first element of the list ("Something 1")
Tries to get the element "ID" from "Something 1" and thus the error
x = ['Something 1'][0]['ID']
you will need to get these from another object, that holds the JSON data you expect.
Try instead to define a function that applies the path to the right variable. Like this:
def updateJson(fileName, update, updatedValue):
# Opening JSON file
f = open(fileName)
# returns JSON object as a dictionary
data = json.load(f)
# Changes the ID value in JSON
update(data, updatedValue)
f.close()
with open("template3.json", "w") as outfile:
json.dump(data, outfile)
x = lambda data, value: data["Something 1"][0].setdefault("ID", value)
updateJson("Temp\\random.json", x, 9)

fastapi body behaving differently between two functions

I have two functions and I'm using similar parameters but for one they work as expected and for the other, they do not:
from fastapi import FastAPI, Header, HTTPException, Body
#app.post("/portfolio/analytics/carbon-footprint", dependencies=[Depends(api_counter)])
async def getPortfolioCarbonFootprint(
tickers: list = Body(...),
func: str = Body(...),
http_client: aiohttp.ClientSession = fastapi.Depends(http_client)
):
print(tickers)
return res
#historical prices
#app.post("/portfolio/analytics/historicalprices", dependencies=[Depends(api_counter)])
async def getPortfolioHistoricalPrices(
tickers: list = Body(...),
http_client: aiohttp.ClientSession = fastapi.Depends(http_client)
):
print(tickers)
jsonResults = await getHistoricalPrices(tickers)
return jsonResults
For both I send it this json:
{"tickers" : [ "IBM.US", "MSFT.US"]}
the first function works perfectly. The second returns this error:
{
"detail": [
{
"loc": [
"body"
],
"msg": "value is not a valid list",
"type": "type_error.list"
}
]
}
Here's where it gets weird. If I send this:
[ "IBM.US", "MSFT.US"]
then it works as expected.
So function 1, works fine. Function 2 is copied from function 1 and it doesn't accept tickers as input but sending it a raw list works.
The difference between the two functions is the number of parameters to be filled in by the user. In the first function you have tickers and func, and in the second one you only have tickers.
From the FastAPI documentation:
But if you have only a single item body parameter from a Pydantic model Item.
By default, FastAPI will then expect its body directly.
But if you want it to expect a JSON with a key item and inside of it the model contents, you can use the special Body parameter embed
So in the second function, if you want to have a key, you must write:
tickers: list = Body(..., embed=True)

Printing dictionary from inside a list puts one character on each line

Yes, yet another. I can't figure out what the issue is. I'm trying to iterate over a list that is a subsection of JSON output from an API call.
This is the section of JSON that I'm working with:
[
{
"created_at": "2017-02-22 17:20:29 UTC",
"description": "",
"id": 1,
"label": "FOO",
"name": "FOO",
"title": "FOO",
"updated_at": "2018-12-04 16:37:09 UTC"
}
]
The code that I'm running that retrieves this and displays it:
#!/usr/bin/python
import json
import sys
try:
import requests
except ImportError:
print "Please install the python-requests module."
sys.exit(-1)
SAT_API = 'https://satellite6.example.com/api/v2/'
USERNAME = "admin"
PASSWORD = "password"
SSL_VERIFY = False # Ignore SSL for now
def get_json(url):
# Performs a GET using the passed URL location
r = requests.get(url, auth=(USERNAME, PASSWORD), verify=SSL_VERIFY)
return r.json()
def get_results(url):
jsn = get_json(url)
if jsn.get('error'):
print "Error: " + jsn['error']['message']
else:
if jsn.get('results'):
return jsn['results']
elif 'results' not in jsn:
return jsn
else:
print "No results found"
return None
def display_all_results(url):
results = get_results(url)
if results:
return json.dumps(results, indent=4, sort_keys=True)
def main():
orgs = display_all_results(KATELLO_API + "organizations/")
for org in orgs:
print org
if __name__ == "__main__":
main()
I appear to be missing a concept because when I print org I get each character per line such as
[
{
"
c
r
e
a
t
e
d
_
a
t
"
It does this through to the final ]
I've also tried to print org['name'] which throws the TypeError: list indices must be integers, not str Python error. This makes me think that org is being seen as a list rather than a dictionary which I thought it would be due to the [{...}] format.
What concept am I missing?
EDIT: An explanation for why I'm not getting this: I'm working with a script in the Red Hat Satellite API Guide which I'm using to base another script on. I'm basically learning as I go.
display_all_results is returning a string since you are doing json.dumps in json.dumps(results, indent=4, sort_keys=True), which converts the dictionary to a string (you are getting that dictionary from r.json() in get_json function)
You then end up iterating over the characters of that string in main, and you see one character per line
Instead just return results from display_all_results and the code will work as intended
def display_all_results(url):
#results is already a dictionary, just return it
results = get_results(url)
if results:
return results
Orgs is a result of json.dump which produces a string. So instead of this code:
for org in orgs:
print(org)
replace it with simply:
#for org in orgs:
print(orgs)

How to optimize the best in python for this program with good performance?

I have a BIG JSON file (as sample shown below) for my application having various variables with values as strings and integers. I would like to read this file and store in the different class variables for further processing. These class variables shall change based on the functionality. I would to know any ideas for further optimizing the below code. In the below code, I am explicitly copying the data without any list comprehensions or any best technique. Any ideas to avoid copying data as config.ID =str(self.data["id"]), config.ACTIVE=int(self.data["isActive"]) and do an efficient way (If I have 1000 variables, need to write 1000 lines.
read_con.py
-----------
import json
class config:
ID=None
ACTIVE=None
AGE=None
NAME=None
GEN=None
COM=None
EMAIL=None
def __init__(self):
self.data = {}
def read_config_data(self, cfile):
try:
with open(cfile, 'r') as cd:
self.data = json.load(cd)
except Exception:
print("Error in Read file")
self.data = {}
else:
# HOW TO AVOID COPY OF DATA AS BELOW.
config.ID =str(self.data["id"])
config.ACTIVE=int(self.data["isActive"])
config.AGE=int(self.data["age"])
config.NAME=str(self.data["name"])
config.GEN=str(self.data["gender"])
config.COM=str(self.data["company"])
config.EMAIL=str(self.data["email"])
def use_variables_modify_based_on_request(self):
config.AGE=45
config.ACTIVE=8
config.EMAIL="x#gmail.com"
def printvalues(self):
print config.ID, config.ACTIVE, config.AGE, config.NAME, config.EMAIL
if __name__ == "__main__":
obj = config()
obj.read_config_data("sample.json")
obj.printvalues()
# Modifying the values of class variables in different functions.
obj.use_variables_modify_based_on_request()
obj.printvalues()
sample.json file
-----------------
{
"id": "59761c233d8d0",
"isActive": 1,
"age": 24,
"name": "Kirsten Sellers",
"gender": "female",
"company": "EMERGENT",
"email": "kirstensellers#emergent.com"
}
Instead of this:
...
else:
# HOW TO AVOID COPY OF DATA AS BELOW.
config.ID =str(self.data["id"])
config.ACTIVE=int(self.data["isActive"])
config.AGE=int(self.data["age"])
config.NAME=str(self.data["name"])
config.GEN=str(self.data["gender"])
config.COM=str(self.data["company"])
config.EMAIL=str(self.data["email"])
...
Do this:
...
else:
for key, value in self.data.items():
setattr(config, key.upper(), value)
...
(there is no need for the str and int calls since the values are already the appropriate type)

Python search replace with multiple Json objects

I wasn't sure how to search for this but I am trying to make a script that dynamically launches programs. I will have a couple of JSON files and I want to be able to do a search replace sort of thing.
So I'll setup an example:
config.json
{
"global_vars": {
"BASEDIR": "/app",
"CONFIG_DIR": "{BASEDIR}/config",
"LOG_DIR": "{BASEDIR}/log",
"CONFIG_ARCHIVE_DIR": "{CONFIG_DIR}/archive"
}
}
Then process.json
{
"name": "Dummy_Process",
"binary": "java",
"executable": "DummyProcess-0.1.0.jar",
"launch_args": "-Dspring.config.location={CONFIG_DIR}/application.yml -Dlogging.config={CONFIG_DIR}/logback-spring.xml -jar {executable}",
"startup_log": "{LOG_DIR}/startup_{name}.out"
}
Now I want to be able to load both of these JSON objects and be able to use the values there to update. So like "CONFIG_ARCHIVE_DIR": "{CONFIG_DIR}/archive" will become CONFIG_ARCHIVE_DIR": "/app/config/archive"
Does anyone know a good way to do this recursively because I'm running into issues when I'm trying to use something like CONFIG_DIR which requires BASEDIR first.
I have this function that loads all the data:
#Recursive function, loops and loads all values into data
def _load_data(data,obj):
for i in obj.keys():
if isinstance(obj[i],str):
data[i]=obj[i]
if isinstance(obj[i],dict):
data=_load_data(data,obj[i])
return data
Then I have this function:
def _update_data(data,data_str=""):
if not data_str:
data_str=json.dumps(data)
for i in data.keys():
if isinstance(data[i],str):
data_str=data_str.replace("{"+i+"}",data[i])
if isinstance(data[i],dict):
data=_update_data(data,data_str)
return json.loads(data_str)
So this works for one level but I don't know if this is the best way to do it. It stops working when I hit a case like the CONFIG_DIR because it would need to loop over the data multiple times. First it needs to update the BASEDIR then once more to update CONFIG_DIR. suggestion welcome.
The end goal of this script is to create a start/stop/status script to manage all of our binaries. They all use different binaries to start and I want one Processes file for multiple servers. Each process will have a servers array to tell the start/stop script what to run on given server. Maybe there's something like this already out there so if there is, please point me in the direction.
I will be running on Linux and prefer to use Python. I want something smart and easy for someone else to pickup and use/modify.
I made something that works with the example files you provided. Note that I didn't handle multiple keys or non-dictionaries in the data. This function accepts a list of the dictionaries obtained after JSON parsing your input files. It uses the fact that re.sub can accept a function for the replacement value and calls that function with each match. I am sure there are plenty of improvements that could be made to this, but it should get you started at least.
def make_config(configs):
replacements = {}
def find_defs(config):
# Find leaf nodes of the dictionary.
defs = {}
for k, v in config.items():
if isinstance(v, dict):
# Nested dictionary so recurse.
defs.update(find_defs(v))
else:
defs[k] = v
return defs
for config in configs:
replacements.update(find_defs(config))
def make_replacement(m):
# Construct the replacement string.
name = m.group(0).strip('{}')
if name in replacements:
# Replace replacement strings in the replacement string.
new = re.sub('\{[^}]+\}', make_replacement, replacements[name])
# Cache result
replacements[name] = new
return new
raise Exception('Replacement string for {} not found'.format(name))
finalconfig = {}
for name, value in replacements.items():
finalconfig[name] = re.sub('\{[^}]+\}', make_replacement, value)
return finalconfig
With this input:
[
{
"global_vars": {
"BASEDIR": "/app",
"CONFIG_DIR": "{BASEDIR}/config",
"LOG_DIR": "{BASEDIR}/log",
"CONFIG_ARCHIVE_DIR": "{CONFIG_DIR}/archive"
}
},
{
"name": "Dummy_Process",
"binary": "java",
"executable": "DummyProcess-0.1.0.jar",
"launch_args": "-Dspring.config.location={CONFIG_DIR}/application.yml -Dlogging.config={CONFIG_DIR}/logback-spring.xml -jar {executable}",
"startup_log": "{LOG_DIR}/startup_{name}.out"
}
]
It gives this output:
{
'BASEDIR': '/app',
'CONFIG_ARCHIVE_DIR': '/app/config/archive',
'CONFIG_DIR': '/app/config',
'LOG_DIR': '/app/log',
'binary': 'java',
'executable': 'DummyProcess-0.1.0.jar',
'launch_args': '-Dspring.config.location=/app/config/application.yml -Dlogging.config=/app/config/logback-spring.xml -jar DummyProcess-0.1.0.jar',
'name': 'Dummy_Process',
'startup_log': '/app/log/startup_Dummy_Process.out'
}
As an alternative to the answer by #FamousJameous and if you don't mind changing to ini format, you can also use the python built-in configparser which already has support to expand variables.
I implemented a solution with a class (Config) with a couple of functions:
_load: simply convert from JSON to a Python object;
_extract_params: loop over the document (output of _load) and add them to a class object (self.params);
_loop: loop over the object returned from _extract_params and, if the values contains any {param}, call the _transform method;
_transform: replace the {param} in the values with the correct values, if there is any '{' in the value linked to the param that needs to be replaced, call again the function
I hope I was clear enough, here is the code:
import json
import re
config = """{
"global_vars": {
"BASEDIR": "/app",
"CONFIG_DIR": "{BASEDIR}/config",
"LOG_DIR": "{BASEDIR}/log",
"CONFIG_ARCHIVE_DIR": "{CONFIG_DIR}/archive"
}
}"""
process = """{
"name": "Dummy_Process",
"binary": "java",
"executable": "DummyProcess-0.1.0.jar",
"launch_args": "-Dspring.config.location={CONFIG_DIR}/application.yml -Dlogging.config={CONFIG_DIR}/logback-spring.xml -jar {executable}",
"startup_log": "{LOG_DIR}/startup_{name}.out"
}
"""
class Config(object):
def __init__(self, documents):
self.documents = documents
self.params = {}
self.output = {}
# Loads JSON to dictionary
def _load(self, document):
obj = json.loads(document)
return obj
# Extracts the config parameters in a dictionary
def _extract_params(self, document):
for k, v in document.items():
if isinstance(v, dict):
# Recursion for inner dictionaries
self._extract_params(v)
else:
# if not a dict set params[k] as v
self.params[k] = v
return self.params
# Loop on the configs dictionary
def _loop(self, params):
for key, value in params.items():
# if there is any parameter inside the value
if len(re.findall(r'{([^}]*)\}', value)) > 0:
findings = re.findall(r'{([^}]*)\}', value)
# call the transform function
self._transform(params, key, findings)
return self.output
# Replace all the findings with the correct value
def _transform(self, object, key, findings):
# Iterate over the found params
for finding in findings:
# if { -> recursion to set all the needed values right
if '{' in object[finding]:
self._transform(object, finding, re.findall(r'{([^}]*)\}', object[finding]))
# Do de actual replace
object[key] = object[key].replace('{'+finding+'}', object[finding])
self.output = object
return self.output
# Entry point
def process_document(self):
params = {}
# _load the documents and extract the params
for document in self.documents:
params.update(self._extract_params(self._load(document)))
# _loop over the params
return self._loop(params)
# return self.output
if __name__ == '__main__':
config = Config([config, process])
print(config.process_document())
I am sure there are many other better ways to reach your goal, but I still hope this can bu useful to you.

Categories