I have a problem which I do not know how to solve. I want that I could print objects by its category. For example, I want to see only footwear objects and etc. What method I should create? I hope you understood what I am asking for if not feel free to ask.
Here is my code:
class Item:
name = ""
price = ""
size = ""
additionalInfo = ""
forWhichSex= ""
itemType= ""
def __init__(self, name, price, size, additionalInfo, forWhichSex, itemType):
self.name = name
self.price = price
self.size = size
self.additionalInfo = additionalInfo
self.forWhichSex = forWhichSex
self.itemType = itemType
#staticmethod
def createItemWithCategory(name,price,size,additionalInfo,forWhichSex,itemType):
if FootWear.isFootWear(itemType):
return FootWear(name,price,size,additionalInfo,forWhichSex,itemType)
elif Appearel.isAppearel(itemType):
return Appearel(name,price,size,additionalInfo,forWhichSex,itemType)
elif Accessory.isAccessory(itemType):
return Accessory(name,price,size,additionalInfo,forWhichSex,itemType)
else:
return None
class FootWear(Item):
def __init__(self,name,price,size,additionalInfo,forWhichSex,itemType):
super().__init__(name,price,size,additionalInfo,forWhichSex,itemType)
#staticmethod
def isFootWear(itemType):
defaultCategory = ["lifestyle shoes", "basketball shoes", "training shoes","running shoes","tennis shoes","soccer shoes","slides shoes"]
for dcategory in defaultCategory:
if dcategory in itemType.lower():
return True
return False
class Appearel(Item):
def __init__(self,name,price,size,additionalInfo,forWhichSex,itemType):
super().__init__(name,price,size,additionalInfo,forWhichSex,itemType)
#staticmethod
def isAppearel(itemType):
defaultCategory = ["shirts", "jackets", "hoodies","pants", "shorts", "bathrobes", "bra", "skirts/dresses", "vests"]
for dcategory in defaultCategory:
if dcategory in itemType.lower():
return True
return False
class Accessory(Item):
def __init__(self,name,price,size,additionalInfo,forWhichSex):
super().__init__(name,price,size,additionalInfo,forWhichSex)
#staticmethod
def isAccessory(itemType):
defaultCategory = ["caps", "wristbands", "backpacks", "socks", "balls", "shoelaces", "watches", "scarves", "gloves", "towels", "flasks", "braces"]
for dcategory in defaultCategory:
if dcategory in itemType.lower():
return True
return False
Reading from file json and printing looks like :
#staticmethod
def jsonOpener(jsonFileName):
myItemList = []
with open(jsonFileName, "r") as filelines:
for fileline in filelines:
jsonObj = json.loads(fileline)
print(str(jsonObj))
myItem = Item.createItemWithCategory(
jsonObj['name'],
jsonObj['price'],
jsonObj['size'],
jsonObj['additionalInfo'],
jsonObj['forWhichSex'],
jsonObj['itemType']
)
print(type(myItem))
myItemList.append(myItem)
return myItemList
You could use list comprehension and use FootWear.isFootWear() static method. Creates a list of item from the itemList list and get the ones that return True when calling your function.
listOfFootwearItems = [item for item in itemList if FootWear.isFootWear(item["itemType"])]
EDIT:
For instance, If you want you could have a function like this:
def filter_by_category(itemList, categoryFunction):
return [item for item in itemList if categoryFunction(item["itemType"])]
And you can call it like this:
a = {"itemType" : "running shoes", "key": 1}
b = {"itemType" : "shirts", "key": 2}
c = {"itemType" : "wristbands", "key": 3}
d = {"itemType" : "running shoes", "key": 4}
l = [a, b, c, d]
footWears = filter_by_category(l, FootWear.isFootWear)
apparels = filter_by_category(l, Appearel.isAppearel)
accessories = filter_by_category(l, Accessory.isAccessory)
Note you are passing the static functions as an argument, remember functions in python are first-class citizens hence can be passed as arguments. If you print this result lists:
print(footWears)
print(apparels)
print(accessories)
Outputs:
[{'itemType': 'running shoes', 'key': 1}, {'itemType': 'running shoes', 'key': 4}]
[{'itemType': 'shirts', 'key': 2}]
[{'itemType': 'wristbands', 'key': 3}]
Related
Can someone help me with my code below? The main purpose of this is to use the data_manager class as a way to store data into a json file.
After being created, a json file named with the specified name, containing a json base, also named with the specified name is created
The major function that handles the majority of the logic is in write_to_json function inside the data_manager class
def write_to_json(self, new_data, base = ""):
There are four major use cases that I am trying to handle:
where match means the item key is found in the file data and base is where we are trying to add the data to (an existing tier / base) (if blank, add it to base)
Case 1 - No match, no base: add to main list
Case 2 - No match, base: add to base
Case 3 - Match, no base: check if value is different and if so,
replace value in main list
Case 4 - Match, base, check if value is different and if so, replace
value in base list
Right now I have case 1 and 3 working, but I am having issues with 2 and 4.
I have been trying many different ways to code this and keep running into problems.
import os
import sys
import json
class file_manager:
def set_file_contents(self, file_name, contents):
file = open(file_name, "w")
file.write(contents)
file.close()
def set_file_contents_append(self, file_name, contents):
if not os.path.exists(file_name):
open(file_name, 'a').close()
file = open(file_name, "a")
file.write(contents)
file.close()
def get_file_size(self,file_name):
return os.path.getsize(file_name)
def get_file_content_json(self,file_name):
return json.load(open(file_name)) if self.get_file_size(file_name) != 0 else None
class data_manager(file_manager):
data_file = None
json_data = []
data_name = None
def __init__(self,data_name):
self.data_base = data_name
self.data_file = data_name + '.json'
self.create_data_file()
def create_base(self,base):
data = {base:[]}
self.set_file_contents(self.data_file,json.dumps(data,indent=4))
def create_data_file(self):
self.create_base(self.data_base)
def check_file_size(self):
print(self.get_file_size(self.data_file))
def check_if_exist(self, data_name):
file_data = self.get_file_content_json(self.data_file)
data_value = False
for item in file_data[self.data_base]:
if data_name in item.keys():
print(f'{data_name}: {item[data_name]}')
data_value = True
return data_value
def get_data_value(self,data_name):
d = self.get_file_content_json(self.data_file)
d = d[self.data_base]
print(data_name)
items = []
for item in self.item_generator(d,data_name):
print(f'returning value = {item}')
items.append(item)
return dict(items[0]) if items else None
def item_generator(self,json_input, lookup_key):
if isinstance(json_input, dict):
for key, value in json_input.items():
if key == lookup_key:
yield {key:value}
else:
yield from self.item_generator(value, lookup_key)
elif isinstance(json_input, list):
for item in json_input:
yield from self.item_generator(item, lookup_key)
def replace_data_value_json(self, file_data, data_name, data_value):
for item in file_data:
if data_name in item.keys():
item[data_name] = data_value
return file_data
def set_data_value(self, data_name, data_value):
file_data = self.get_file_content_json(self.data_file)
for item in file_data[self.data_base]:
if data_name in item.keys():
item[data_name] = data_value
self.set_file_contents(self.data_file,json.dumps(file_data,indent=4))
def view_all_data(self):
file_data = self.get_file_content_json(self.data_file)
print((file_data))
def remove_data_item(self, data_name):
file_data = self.get_file_content_json(self.data_file)
print(file_data)
for element in file_data[self.data_base]:
if data_name in element:
del element[data_name]
self.set_file_contents(self.data_file,json.dumps(file_data,indent=4).replace('{}','').replace('\{\},','') )
def prettyjson(self,data):
return json.dumps(data,indent=4)
def compare_equal(self, value1, value2):
print(f'{value1} really vs {value2}')
return True if str(value1) == str(value2) else False
def write_to_json(self, new_data, base = ""):
file_data = self.get_file_content_json(self.data_file)
#print(f'Data before:\n{json.dumps(file_data,indent=4)}')
base = self.data_base if base == "" else base
#print(list(file_data) - file_data[base])
print(f'Complete file:\n{self.prettyjson(file_data)}')
#file_starting_from_base = list(file_data[data_name])
print('Starting from base')
print(self.prettyjson(file_data[self.data_base]))
file_data_before = ([item for item in file_data if item not in file_data[self.data_base]])
#print(file_data_before)
for data_item in new_data:
match_found = False
index = None
value_from_name = self.get_data_value(data_item)
value_from_name = list(value_from_name) if value_from_name else None
# print(f'before: {value_from_name} {type(value_from_name)} {list(value_from_name)} ')
value_from_name = value_from_name[0] if value_from_name and value_from_name[0] else None
value_from_name_found = True if value_from_name else False
if value_from_name_found:
print(f'found: {value_from_name}')
match_found = True
#OLD METHOD USED TO FIND INDEX
# #if base != self.data_base:
# index = 0
# for item in file_data[self.data_base]:
# print(f'{list(item.keys())[0]} vs {base}')
# if str(base) in list(item.keys())[0]:
# print(f'MATCH FOUND = {base} = {list(item.keys())[0]}')#: {item.values()} {index}')
# match_found = True
# break
# index += 1
# print(index)
# #return
data_single_item = {data_item:new_data[data_item]}
if not match_found:
#Case 1 - No match, no base: add to main list
if base == self.data_base:
file_data[self.data_base].append(data_single_item)
else:
#Case 2 - No match, base: add to base
print(f'ADD {data_single_item} TO {file_data[self.data_base]} starting from {base}')
#possible idea: create base and try again adding values again
#self.create_base(base)
#self.write_to_json(data_single_item,base)
#file_data[self.data_base].append(data_single_item) #broken
#old working method, broken without index
#file_data[self.data_base][index][base].append(data_single_item)
#MATCH FOUND
else:
#Case 3 - Match, no base: check if value is different and if so, replace value in main list
if base == self.data_base:
file_data[base] = self.replace_data_value_json(file_data[base],str(data_item),new_data[data_item])
pass
else:
#Case 4 - Match, base, check if value is different and if so, replace value in base list
print(f'data = {self.get_data_value(data_item)}' )
# print(f'check {new_data[data_item]} vs {list(self.get_data_value(data_item))[0]}')
value_from_name = self.get_data_value(data_item)
value_from_name = list(value_from_name) if value_from_name else None
print(f'before: {value_from_name} {type(value_from_name)} {list(value_from_name)} ')
value_from_name = value_from_name[0] if value_from_name and value_from_name[0] else None
value_from_name_found = True if value_from_name else False
if value_from_name_found and (not self.compare_equal( new_data[data_item], value_from_name ) ):
print(f'{new_data[data_item]} NOT EQUAL TO {value_from_name}')
# change value to new value
#file_data[self.data_base][base].append(data_single_item)
#print(f'add {data_single_item}')
final_output = {self.data_base:file_data[self.data_base]}
self.set_file_contents(self.data_file,self.prettyjson(final_output))
def add_data_single(self, data_name, data_value, base):
new_data_item = {data_name: data_value}
self.write_to_json(new_data_item, base)
def add_data_multiple(self,data,base=""):
self.write_to_json(data,base)
# CREATE 'people.json' AND create json base matching name in file ( { "people": [] } )
test = data_manager('people')
# CREATE 3 ITEMS STARTING IN MAIN BASE
test.write_to_json({'John':[], 'Alex':[], 'Samantha':[]}) # CASE 1
# SHOULD ATTEMPT TO ADD VALUES TO BASE 'john',
# if dictionary key matches, check if key matches
# if key and value match, do nothing and do not overwrite file)
# if key matches and value does not, change the value of the item matching the key starting from base 'John'
# if dictionary key does not match, add full dictionary item to base
test.write_to_json({"Favorite-Food":"tacos" , "Age":45}, "John") # CASE 2
# CREATE
#test.write_to_json({'Example2-Sub1':44},'Example2')
I think/hope you might have some unwanted lists in your json and that when you indicate you are hoping for:
{
"people": [
{"John": [{"favorite-food": "tacos", "Age": 45}]},
{"Alex": []},
{"Samantha": []}
]
}
what you really want is:
{
"people": {
"John": {"favorite-food": "tacos", "Age": 45}
},
{"Alex": {}},
{"Samantha": {}}
}
If that is what you want in the end, then this code based on merging dictionaries via the {**a, **b} method is the way forward:
import json
import os
class data_manager():
BASE_COLLECTIONS_FOLDER = "./data"
def __init__(self, collection_name):
self.collection_name = collection_name
self.collection_file_path = f"{self.BASE_COLLECTIONS_FOLDER}/{self.collection_name}.json"
self.collection = {}
self.ensure_collection()
self.load_collection()
def ensure_collection(self):
if os.path.isfile(self.collection_file_path):
return
os.makedirs(self.BASE_COLLECTIONS_FOLDER, exist_ok=True)
self.save_collection()
def load_collection(self):
with open(self.collection_file_path, "r", encoding="utf-8") as collection_file:
self.collection = json.load(collection_file)[self.collection_name]
def save_collection(self):
with open(self.collection_file_path, "w", encoding="utf-8") as collection_file:
json.dump({self.collection_name: self.collection}, collection_file, indent=4)
def write_to_json(self, data, key=None):
if not key:
self.collection = {**self.collection, **data}
else:
self.collection[key] = {**self.collection.get(key, {}), **data}
self.save_collection()
people = data_manager("people")
people.write_to_json({"John": {}, "Alex": {}, "Samantha": {}})
people.write_to_json({"Favorite-Food": "tacos", "Age":45}, "John")
people.write_to_json({"Parents": {"Mother": "Britney", "Dad": "Adam"}}, "John")
people.write_to_json({"Parents": {"Mother": "Britney", "Dad": "John"}}, "John")
people.write_to_json({"Bob": {"name": "not bob"}})
people.write_to_json({"Bob": {"name": "bob"}})
people.write_to_json({"Example2-Sub1": 44}, "Example2")
Running this will result in a file who's contents are:
{
"people": {
"John": {
"Favorite-Food": "tacos",
"Age": 45,
"Parents": {
"Mother": "Britney",
"Dad": "John"
}
},
"Alex": {},
"Samantha": {},
"Example2": {
"Example2-Sub1": 44
},
"Bob": {
"name": "bob"
}
}
}
I have a class containing several lists as attributes and several add methods to append an object to a specific list based on its type.
My code reads a csv file containing the type of an object in order to create and add it to my cart.
My problem is that I'm testing the object type to call the right 'add' function using if elif syntax but this is not very nice and hard to maintain.
For example
import csv
class my_item():
def __init__(self, name):
self.name = name
class fruit(my_item):
pass
class vegetable(my_item):
pass
class meat(my_item):
pass
class fish(my_item):
pass
class shopping_cart():
def __init__(self):
self.fruits = []
self.vegetables = []
self.meat = []
self.fish = []
def add_fruit(self, o):
self.fruits.append(o)
def add_vegetable(self, o):
self.vegetables.append(o)
def add_meat(self, o):
self.meat.append(o)
def add_fish(self, o):
self.fish.append(o)
def __str__(self):
msg = ""
msg += "{:<25}= {:<5}\n".format('Total', str(len(self.fruits) + len(self.vegetables) + len(self.meat) + len(self.fish)))
for attrname in vars(self):
value = getattr(self, attrname)
if isinstance(value, list):
msg += " {:<23}= {:<5}\n".format(attrname, len(value))
return msg
def main():
input_f = 'input.csv'
my_cart = shopping_cart()
with open(input_f, 'r') as i:
rows = csv.reader(i, delimiter=';')
for row in rows:
item = globals()[row[0]](row[1])
if item.__class__.__name__ == 'fruit':
my_cart.add_fruit(item)
elif item.__class__.__name__ == 'vegetable':
my_cart.add_vegetable(item)
elif item.__class__.__name__ == 'meat':
my_cart.add_meat(item)
else:
my_cart.add_fish(item)
print (my_cart)
if __name__ == '__main__':
main()
Do you see any alternatives to the if elif block?
Thanks for your feedback.
Sure, you just need to create the function name dynamically and call it.
Be careful, this will works only if my_cart have add_{{ item name }} method.
def main():
input_f = 'input.csv'
my_cart = shopping_cart()
with open(input_f, 'r') as i:
rows = csv.reader(i, delimiter=';')
for row in rows:
item = globals()[row[0]](row[1])
item_name = item.__class__.__name__
item_add_func_name = 'add_{}'.format(item_name)
item_add_func = getattr(my_cart, item_add_func_name, None)
if item_add_func and callable(item_add_func):
item_add_func(item)
# if item.__class__.__name__ == 'fruit':
# my_cart.add_fruit(item)
# elif item.__class__.__name__ == 'vegetable':
# my_cart.add_vegetable(item)
# elif item.__class__.__name__ == 'meat':
# my_cart.add_meat(item)
# else:
# my_cart.add_fish(item)
print (my_cart)
May I suggest a simpler class design.
my_item is left as it-is and other classes fruit, vegetable etc. are removed
shopping_cart is modified such that self.items is a dictionary where the key is the name of item, fruit, vegetables, and the values are the list of those items
Then the code might look like as follows
import csv
from collections import defaultdict
class my_item:
def __init__(self, name):
self.name = name
class shopping_cart:
def __init__(self):
#Dictionary to hold items
self.items = defaultdict(list)
def add_item(self, type, name):
#Increment the count of the item by 1
self.items[type].append(name)
def __str__(self):
#Iterate through the dictionary and print all key/value pairs
msg = ""
for k,v in self.items.items():
msg += ' {}: {} '.format(k, v)
return msg.strip()
sc = shopping_cart()
sc.add_item('fruit', 'pomme')
sc.add_item('vegetable', 'dinde')
sc.add_item('meat', 'carotte')
sc.add_item('fish', 'saumon')
print(sc)
The output will look like
fruit: ['pomme'] vegetable: ['dinde'] meat: ['carotte'] fish: ['saumon']
Is there a sure-fire way to check that the class of an object is a sub-class of the desired super?
For Example, in a migration script that I'm writing, I have to convert objects of a given type to dictionaries in a given manner to ensure two-way compatability of the data.
This is best summed up like so:
Serializable
User
Status
Issue
Test
Set
Step
Cycle
However, when I'm recursively checking objects after depickling, I receive a Test object that yields the following results:
Testing data object type:
type(data)
{type}< class'__main.Test' >
Testing Class type:
type(Test())
{type}< class'__main.Test' >
Testing object type against class type:
type(Test()) == type(data)
{bool}False
Testing if object isinstance() of Class:
isinstance(data, Test)
{bool}False
Testing if Class isinstance() of Super Class:
isinstance(Test(), Serializable)
{bool}True
Testing isinstance() of Super Class::
isinstance(data, Serializable)
{bool}False
Interestingly, it doesn't appear to have any such problem prior to pickling as it handles the creation of dictionary and integrity hash just fine.
This only crops up with depickled objects in both Pickle and Dill.
For Context, here's the code in it's native environment - the DataCache object that is pickled:
class DataCache(object):
_hash=""
_data = None
#staticmethod
def genHash(data):
dataDict = DataCache.dictify(data)
datahash = json.dumps(dataDict, sort_keys=True)
return hashlib.sha256(datahash).digest()
#staticmethod
def dictify(data):
if isinstance(data,list):
datahash = []
for item in data:
datahash.append(DataCache.dictify(item))
elif isinstance(data,(dict, collections.OrderedDict)):
datahash = collections.OrderedDict()
for key,value in datahash.iteritems():
datahash[key]= DataCache.dictify(value)
elif isinstance(data, Serializable):
datahash = data.toDict()
else:
datahash = data
return datahash
def __init__(self, restoreDict = {}):
if restoreDict:
self.__dict__.update(restoreDict)
def __getinitargs__(self):
return (self.__dict__)
def set(self, data):
self._hash = DataCache.genHash(data)
self._data = data
def verify(self):
dataHash = DataCache.genHash(self._data)
return (self._hash == dataHash)
def get(self):
return self._data
Finally, I know there's arguments for using JSON for readability in storage, I needed Pickle's ability to convert straight to and from Objects without specifying the object type myself. (thanks to the nesting, it's not really feasible)
Am I going mad here or does pickling do something to the class definitions?
EDIT:
Minimal Implementation:
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
from aenum import Enum
import json # _tricks
import base64
import argparse
import os
import sys
import datetime
import dill
import hashlib
import collections
class Serializable(object):
def __init__(self, initDict={}):
if initDict:
self.__dict__.update(initDict)
def __str__(self):
return str(self.sortSelf())
def sortSelf(self):
return collections.OrderedDict(sorted(self.__dict__.items()))
def toDict(self):
return self.__dict__
def fromDict(self, dict):
# Not using __dict__.update(...) to avoid polluting objects with the excess data
varMap = self.__dict__
if dict and varMap:
for key in varMap:
if (key in dict):
varMap[key] = dict[key]
self.__dict__.update(varMap)
return self
return None
class Issue(Serializable):
def __init__(self, initDict={}):
self.id = 0
self.key = ""
self.fields = {}
if initDict:
self.__dict__.update(initDict)
Serializable.__init__(self)
def fieldToDict(self, obj, key, type):
if key in obj:
result = obj[key]
else:
return None
if result is None:
return None
if isinstance(result, type):
return result.toDict()
return result
def fromDict(self, jsonDict):
super(Issue, self).fromDict(jsonDict)
self.fields["issuetype"] = IssueType().fromDict(self.fields["issuetype"])
self.fields["assignee"] = User().fromDict(self.fields["assignee"])
self.fields["creator"] = User().fromDict(self.fields["creator"])
self.fields["reporter"] = User().fromDict(self.fields["reporter"])
return self
def toDict(self):
result = super(Issue, self).toDict()
blankKeys = []
for fieldName, fieldValue in self.fields.iteritems():
if fieldValue is None:
blankKeys.append(fieldName)
if blankKeys:
for key in blankKeys:
self.fields.pop(key, None)
result["fields"]["issuetype"] = self.fieldToDict(result["fields"], "issuetype", IssueType)
result["fields"]["creator"] = self.fieldToDict(result["fields"], "creator", User)
result["fields"]["reporter"] = self.fieldToDict(result["fields"], "reporter", User)
result["fields"]["assignee"] = self.fieldToDict(result["fields"], "assignee", User)
return result
class IssueType(Serializable):
def __init__(self):
self.id = 0
self.name = ""
def toDict(self):
return {"id": str(self.id)}
class Project(Serializable):
def __init__(self):
Serializable.__init__(self)
self.id = 0
self.name = ""
self.key = ""
class Cycle(Serializable):
def __init__(self):
self.id = 0
self.name = ""
self.totalExecutions = 0
self.endDate = ""
self.description = ""
self.totalExecuted = 0
self.started = ""
self.versionName = ""
self.projectKey = ""
self.versionId = 0
self.environment = ""
self.totalCycleExecutions = 0
self.build = ""
self.ended = ""
self.name = ""
self.modifiedBy = ""
self.projectId = 0
self.startDate = ""
self.executionSummaries = {'executionSummary': []}
class Step(Serializable):
def __init__(self):
self.id = ""
self.orderId = 0
self.step = ""
self.data = ""
self.result = ""
self.attachmentsMap = {}
def toDict(self):
dict = {}
dict["step"] = self.step
dict["data"] = self.data
dict["result"] = self.result
dict["attachments"] = []
return dict
class Status(Serializable):
def __init__(self):
self.id = 0
self.name = ""
self.description = ""
self.isFinal = True
self.color = ""
self.isNative = True
self.statusCount = 0
self.statusPercent = 0.0
class User(Serializable):
def __init__(self):
self.displayName = ""
self.name = ""
self.emailAddress = ""
self.key = ""
self.active = False
self.timeZone = ""
class Execution(Serializable):
def __init__(self):
self.id = 0
self.orderId = 0
self.cycleId = -1
self.cycleName = ""
self.issueId = 0
self.issueKey = 0
self.projectKey = ""
self.comment = ""
self.versionId = 0,
self.versionName = "",
self.executedOn = ""
self.creationDate = ""
self.executedByUserName = ""
self.assigneeUserName = ""
self.status = {}
self.executionStatus = ""
def fromDict(self, jsonDict):
super(Execution, self).fromDict(jsonDict)
self.status = Status().fromDict(self.status)
# This is already listed as Execution Status, need to associate and convert!
return self
def toDict(self):
result = super(Execution, self).toDict()
result['status'] = result['status'].toDict()
return result
class ExecutionContainer(Serializable):
def __init__(self):
self.executions = []
def fromDict(self, jsonDict):
super(ExecutionContainer, self).fromDict(jsonDict)
self.executions = []
for executionDict in jsonDict["executions"]:
self.executions.append(Execution().fromDict(executionDict))
return self
class Test(Issue):
def __init__(self, initDict={}):
if initDict:
self.__dict__.update(initDict)
Issue.__init__(self)
def toDict(self):
result = super(Test, self).toDict()
stepField = "CustomField_0001"
if result["fields"][stepField]:
steps = []
for step in result["fields"][stepField]["steps"]:
steps.append(step.toDict())
result["fields"][stepField] = steps
return result
def fromDict(self, jsonDict):
super(Test, self).fromDict(jsonDict)
stepField = "CustomField_0001"
steps = []
if stepField in self.fields:
for step in self.fields[stepField]["steps"]:
steps.append(Step().fromDict(step))
self.fields[stepField] = {"steps": steps}
return self
class Set(Issue):
def __init__(self, initDict={}):
self.__dict__.update(initDict)
Issue.__init__(self)
class DataCache(object):
_hash = ""
_data = None
#staticmethod
def genHash(data):
dataDict = DataCache.dictify(data)
datahash = json.dumps(dataDict, sort_keys=True)
return hashlib.sha256(datahash).digest()
#staticmethod
def dictify(data):
if isinstance(data, list):
datahash = []
for item in data:
datahash.append(DataCache.dictify(item))
elif isinstance(data, (dict, collections.OrderedDict)):
datahash = collections.OrderedDict()
for key, value in datahash.iteritems():
datahash[key] = DataCache.dictify(value)
elif isinstance(data, Serializable):
datahash = data.toDict()
else:
datahash = data
return datahash
def __init__(self, restoreDict={}):
if restoreDict:
self.__dict__.update(restoreDict)
def __getinitargs__(self):
return (self.__dict__)
def set(self, data):
self._hash = DataCache.genHash(data)
self._data = data
def verify(self):
dataHash = DataCache.genHash(self._data)
return (self._hash == dataHash)
def get(self):
return self._data
def saveCache(name, projectKey, object):
filePath = "migration_caches/{projectKey}".format(projectKey=projectKey)
if not os.path.exists(path=filePath):
os.makedirs(filePath)
cache = DataCache()
cache.set(object)
targetFile = open("{path}/{name}".format(name=name, path=filePath), 'wb')
dill.dump(obj=cache, file=targetFile)
targetFile.close()
def loadCache(name, projectKey):
filePath = "migration_caches/{projectKey}/{name}".format(name=name, projectKey=projectKey)
result = False
try:
targetFile = open(filePath, 'rb')
try:
cache = dill.load(targetFile)
if isinstance(cache, DataCache):
if cache.verify():
result = cache.get()
except EOFError:
# except BaseException:
print ("Failed to load cache from file: {filePath}\n".format(filePath=filePath))
except IOError:
("Failed to load cache file at: {filePath}\n".format(filePath=filePath))
targetFile.close()
return result
testIssue = Test().fromDict({"id": 1000,
"key": "TEST",
"fields": {
"issuetype": {
"id": 1,
"name": "TestIssue"
},
"assignee": "Minothor",
"reporter": "Minothor",
"creator": "Minothor",
}
})
saveCache("Test", "TestProj", testIssue)
result = loadCache("Test", "TestProj")
EDIT 2
The script in it's current form, now seems to work correctly with vanilla Pickle, (initially switched to Dill due to a similar issue, which was solved by the switch).
However, if you are here with this issue and require Dill's features, then as Mike noted in the comments - it's possible to change the settings in dill.settings to have Dill behave pickle referenced items only with joblib mode, effectively mirroring pickle's standard pickling behaviour.
Here's my code, the intention of which is to crawl a given folder and look for .md and .pdf files, and build a tree-like structure which describes it.
I'm probably really overthinking it, so I could really use a second set of eyes on this.
class Resource_Item:
def __init__(self, name=None, stub=None, path=None, parent=None, html_file_location=None, documents=[], children=[]):
self.name = name
self.stub = stub
self.path = path
self.parent = parent
self.html_file_location = html_file_location
self.documents = documents
self.children = children
def add_child(self, c):
self.children.append(c)
def to_json(self):
o = {
'name' : self.name,
'stub' : self.stub,
'path' : self.path,
'parent' : self.parent,
'html_file_location' : self.html_file_location,
'documents' : self.documents,
'children' : [c.to_json() for c in self.children] } #len(self.children)
return json.dumps(o)
def walk_dir(root, parent = None):
"""
>>> walk_dir("./test_docs/folder containing pdfs/").documents
['dummy_pdf 2.pdf', 'dummy_pdf 3.pdf', 'dummy_pdf 4.pdf', 'dummy_pdf.pdf']
>>> len(walk_dir("./test_docs/folder containing pdfs/").children)
0
>>> walk_dir("./test_docs/folder containing markdown and pdfs/").stub is None
False
>>> walk_dir("./test_docs/folder containing markdown and pdfs/").children
['dummy_pdf 2.pdf', 'dummy_pdf 3.pdf', 'dummy_pdf 4.pdf', 'dummy_pdf.pdf']
"""
file_or_folder_name_no_ext = os.path.splitext(os.path.basename(root))[0]
entry = Resource_Item( name=file_or_folder_name_no_ext, parent=parent, path=os.path.abspath(root) )
for item in os.listdir(root):
path = os.path.join(os.path.abspath(root), item)
if os.path.isfile(path):
if item.endswith(".pdf"):
entry.documents.append(item)
elif item.endswith(".md"):
entry.stub = read_markdown_file_as_html(path)
elif os.path.isdir(path):
if dir_contains_pdf(path):
print('found a path to contain PDFs: "'+str(path)+'"')
entry.add_child(walk_dir(path)) # broken!
#entry.add_child(path)
return entry
What appears to be happening is that on the entry.add_child(walk_dir(path)) line, walk_dir doesn't properly create a new instance of Resource_Item, since my testing shows that Resource_Item.children gets populated with all the pdfs in that file tree, not just those in the immediate folder.
As for my supporting functions, I'm pretty sure they work properly, but here they are for completeness:
def dir_contains_pdf(root):
"""
>>> dir_contains_pdf("./test_docs/folder containing pdfs/")
True
>>> dir_contains_pdf("./test_docs/folder containing nothing/")
False
>>> dir_contains_pdf("./test_docs/folder containing folders, markdown, and pdf/")
True
>>> dir_contains_pdf("./test_docs/folder containing markdown and pdfs/")
True
"""
root = os.path.abspath(root)
for item in os.listdir(root):
item_path = os.path.join(root, item)
if os.path.isfile(item_path):
if item.endswith(".pdf"):
return True
elif os.path.isdir(item_path):
if dir_contains_pdf(item_path):
return True
return False
def read_markdown_file_as_html(markdown_filename):
f = open(markdown_filename, 'r')
markdown_content = f.read()
return markdown.markdown(markdown_content)
As another view of how this recursion should be working, I built this other program in the same style to confirm that it works, and it does work properly, so I'm guessing the issue has to do with how I'm using the Python file API:
class Item:
def __init__(self, n=None):
self.n = n
self.children = []
def add_child(self, c):
self.children.append(c)
def to_o(self):
o = { 'n' : self.n, 'children' : [c.to_o() for c in self.children] }
return o
def bad(count):
item = Item(n=count)
print('count : '+str(count))
if count > 100 or count == 0:
return item
elif (count-1) % 2 == 0:
print(str(count) + ' is odd')
item.add_child(bad(count*3))
elif count % 2 == 0:
print(str(count) + ' is even')
item.add_child(bad(count/2))
return item
import json
print(json.dumps(bad(7).to_o()))
I would like to create a dict, containing several objects of the same class. Each object must be independent.
Something like:
#!/usr/bin/python3
class myReserve():
myList = dict()
def __init__(self, initName):
self.myName = initName
self.setList()
def setList(self):
if self.myName == "fruit":
self.myList[0] = "Orange"
self.myList[1] = "Lemon"
elif self.myName == "vegetable":
self.myList[0] = "Tomato"
self.myList[1] = "Carrot"
#If neither fruit nor vegetable
#myList should be empty.
myStore = dict()
myStore[0] = myReserve("fruit")
myStore[1] = myReserve("vegetable")
myStore[2] = myReserve("spices")
print(myStore[0].myList)
This prints:
{0: 'Tomato', 1: 'Carrot'}
I thought it would print:
{0: 'Orange', 1: 'Lemon'}
I understood objects are passed by reference in Python.
dict1 = {"var": 128}
dict2 = dict1
dict2["var"] = 0
print(dict1["var"])
Will print:
0
By creating a class I want to create a structure for different objects. I don't understand the behaviour of the first code example. Is it possible to do something like this in a Python way?
Your problem is that you're defining myList on the class level, so that it's shared by every instance of myReserve. Try defining it in myReserve.__init__ instead:
class myReserve():
def __init__(self, initName):
self.myList = dict()
Full code:
#!/usr/bin/python3
class myReserve():
def __init__(self, initName):
self.myList = dict()
self.myName = initName
self.setList()
def setList(self):
if self.myName == "fruit":
self.myList[0] = "Orange"
self.myList[1] = "Lemon"
elif self.myName == "vegetable":
self.myList[0] = "Tomato"
self.myList[1] = "Carrot"
#If neither fruit nor vegetable
#myList should be empty.
myStore = dict()
myStore[0] = myReserve("fruit")
myStore[1] = myReserve("vegetable")
myStore[2] = myReserve("spices")
print(myStore[0].myList)