Shortest distance algorithm Python - python

I wanted to create a simple breadth first search algorithm, which returns the shortest path.
An actor information dictionary maps and actor to the list of movies the actor appears in:
actor_info = { "act1" : ["movieC", "movieA"], "act2" : ["movieA", "movieB"],
"act3" :["movieA", "movieB"], "act4" : ["movieC", "movieD"],
"act5" : ["movieD", "movieB"], "act6" : ["movieE"],
"act7" : ["movieG", "movieE"], "act8" : ["movieD", "movieF"],
"KevinBacon" : ["movieF"], "act10" : ["movieG"], "act11" : ["movieG"] }
The inverse of this maps movies to the list of actors appearing in them:
movie_info = {'movieB': ['act2', 'act3', 'act5'], 'movieC': ['act1', 'act4'],
'movieA': ['act1', 'act2', 'act3'], 'movieF': ['KevinBacon', 'act8'],
'movieG': ['act7', 'act10', 'act11'], 'movieD': ['act8', 'act4', 'act5'],
'movieE': ['act6', 'act7']}
so for a call
shortest_dictance("act1", "Kevin Bacon", actor_info, movie_info)
I should get 3 since act1 appears in movieC with Act4 who appears in movieD with Act8 who appears in movie F with KevinBacon. So the shortest distance is 3.
So far I have this:
def shotest_distance(actA, actB, actor_info, movie_info):
'''Return the number of movies required to connect actA and actB.
If theres no connection return -1.'''
# So we keep 2 lists of actors:
# 1.The actors that we have already investigated.
# 2.The actors that need to be investigated because we have found a
# connection beginning at actA. This list must be
# ordered, since we want to investigate actors in the order we
# discover them.
# -- Each time we put an actor in this list, we also store
# her distance from actA.
investigated = []
to_investigate = [actA]
distance = 0
while actB not in to_investigate and to_investigate!= []:
for actor in to_investigate:
to_investigated.remove(actA)
investigated.append(act)
for movie in actor_info[actor]:
for co_star in movie_info[movie]:
if co_star not in (investigated and to_investigate):
to_investigate.append(co_star)
....
....
return d
I can't figure the appropriate way to keep track of the distances discovered each of iteration of the code. Also the code seems to be very ineffecient time wise.

Firstly create one graph out of this to connect all the nodes and then run the shortest_path code(There could be an efficient graph library to do this instead of the function mentioned below, nevertheless this one is elegant) and then find out all the number of movie names from the shortest path.
for i in movie_info:
actor_info[i] = movie_info[i]
def find_shortest_path(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if not start in graph:
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = find_shortest_path(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
L = find_shortest_path(actor_info, 'act1', 'act2')
print len([i for i in L if i in movie_info])
find_shortest_path Source: http://www.python.org/doc/essays/graphs/

This looks like it works. It keeps track of a current set of movies. For each step, it looks at all of the one-step-away movies which haven't already been considered ("seen").
actor_info = { "act1" : ["movieC", "movieA"], "act2" : ["movieA", "movieB"],
"act3" :["movieA", "movieB"], "act4" : ["movieC", "movieD"],
"act5" : ["movieD", "movieB"], "act6" : ["movieE"],
"act7" : ["movieG", "movieE"], "act8" : ["movieD", "movieF"],
"KevinBacon" : ["movieF"], "act10" : ["movieG"], "act11" : ["movieG"] }
movie_info = {'movieB': ['act2', 'act3', 'act5'], 'movieC': ['act1', 'act4'],
'movieA': ['act1', 'act2', 'act3'], 'movieF': ['KevinBacon', 'act8'],
'movieG': ['act7', 'act10', 'act11'], 'movieD': ['act8', 'act4', 'act5'],
'movieE': ['act6', 'act7']}
def shortest_distance(actA, actB, actor_info, movie_info):
if actA not in actor_info:
return -1 # "infinity"
if actB not in actor_info:
return -1 # "infinity"
if actA == actB:
return 0
dist = 1
movies = set(actor_info[actA])
end_movies = set(actor_info[actB])
if movies & end_movies:
return dist
seen = movies.copy()
print "All movies with", actA, seen
while 1:
dist += 1
next_step = set()
for movie in movies:
for actor in movie_info[movie]:
next_step.update(actor_info[actor])
print "Movies with actors from those movies", next_step
movies = next_step - seen
print "New movies with actors from those movies", movies
if not movies:
return -1 # "Infinity"
# Has actorB been in any of those movies?
if movies & end_movies:
return dist
# Update the set of seen movies, so I don't visit them again
seen.update(movies)
if __name__ == "__main__":
print shortest_distance("act1", "KevinBacon", actor_info, movie_info)
The output is
All movies with act1 set(['movieC', 'movieA'])
Movies with actors from those movies set(['movieB', 'movieC', 'movieA', 'movieD'])
New movies with actors from those movies set(['movieB', 'movieD'])
Movies with actors from those movies set(['movieB', 'movieC', 'movieA', 'movieF', 'movieD'])
New movies with actors from those movies set(['movieF'])
3
Here's a version which returns a list of movies making up the minimum connection (None for no connection, and an empty list if the actA and actB are the same.)
def connect(links, movie):
chain = []
while movie is not None:
chain.append(movie)
movie = links[movie]
return chain
def shortest_distance(actA, actB, actor_info, movie_info):
if actA not in actor_info:
return None # "infinity"
if actB not in actor_info:
return None # "infinity"
if actA == actB:
return []
# {x: y} means that x is one link outwards from y
links = {}
# Start from the destination and work backward
for movie in actor_info[actB]:
links[movie] = None
dist = 1
movies = links.keys()
while 1:
new_movies = []
for movie in movies:
for actor in movie_info[movie]:
if actor == actA:
return connect(links, movie)
for other_movie in actor_info[actor]:
if other_movie not in links:
links[other_movie] = movie
new_movies.append(other_movie)
if not new_movies:
return None # Infinity
movies = new_movies
if __name__ == "__main__":
dist = shortest_distance("act1", "KevinBacon", actor_info, movie_info)
if dist is None:
print "Not connected"
else:
print "The Kevin Bacon Number for act1 is", len(dist)
print "Movies are:", ", ".join(dist)
Here's the output:
The Kevin Bacon Number for act1 is 3
Movies are: movieC, movieD, movieF

Related

want to add left out string in matched string

Below is my example code:
from fuzzywuzzy import fuzz
import json
from itertools import zip_longest
synonyms = open("synonyms.json","r")
synonyms = json.loads(synonyms.read())
vendor_data = ["i7 processor","solid state","Corei5 :1135G7 (11th
Generation)","hard
drive","ddr 8gb","something1", "something2",
"something3","HT (100W) DDR4-2400"]
buyer_data = ["i7 processor 12 generation","corei7:latest technology"]
vendor = []
buyer = []
for item,value in synonyms.items():
for k,k2 in zip_longest(vendor_data,buyer_data):
for v in value:
if fuzz.token_set_ratio(k,v) > 70:
if item in k:
vendor.append(k)
else:
vendor.append(item+" "+k)
else:
#didnt get only "something" strings here !
if fuzz.token_set_ratio(k2,v) > 70:
if item in k2:
buyer.append(k2)
else:
buyer.append(item+" "+k2)
vendor = list(set(vendor))
buyer = list(set(buyer))
vendor,buyer
Note: "something" string can be anything like "battery" or "display"etc
synonyms json
{
"processor":["corei5","core","corei7","i5","i7","ryzen5","i5 processor","i7
processor","processor i5","processor i7","core generation","core gen"],
"ram":["DDR4","memory","DDR3","DDR","DDR 8gb","DDR 8 gb","DDR 16gb","DDR 16 gb","DDR
32gb","DDR 32 gb","DDR4-"],
"ssd":["solid state drive","solid drive"],
"hdd":["Hard Drive"]
}
what do i need ?
I want to add all "something" string inside vendor list dynamically.
! NOTE -- "something" string can be anything in future.
I want to add "something" string in vendor array which is not a matched value in fuzz>70! I want to basically add left out data also.
for example like below:
current output
['processor Corei5 :1135G7 (11th Generation)',
'i7 processor',
'ram HT (100W) DDR4-2400',
'ram ddr 8gb',
'hdd hard drive',
'ssd solid state']
expected output below
['processor Corei5 :1135G7 (11th Generation)',
'i7 processor',
'ram HT (100W) DDR4-2400',
'ram ddr 8gb',
'hdd hard drive',
'ssd solid state',
'something1',
'something2'
'something3'] #something string need to be added in vendor list dynamically.
what silly mistake am I doing ? Thank you.
Here's my attempt:
from fuzzywuzzy import process, fuzz
synonyms = {'processor': ['corei5', 'core', 'corei7', 'i5', 'i7', 'ryzen5', 'i5 processor', 'i7 processor', 'processor i5', 'processor i7', 'core generation', 'core gen'], 'ram': ['DDR4', 'memory', 'DDR3', 'DDR', 'DDR 8gb', 'DDR 8 gb', 'DDR 16gb', 'DDR 16 gb', 'DDR 32gb', 'DDR 32 gb', 'DDR4-'], 'ssd': ['solid state drive', 'solid drive'], 'hdd': ['Hard Drive']}
vendor_data = ['i7 processor', 'solid state', 'Corei5 :1135G7 (11th Generation)', 'hard drive', 'ddr 8gb', 'something1', 'something2', 'something3', 'HT (100W) DDR4-2400']
buyer_data = ['i7 processor 12 generation', 'corei7:latest technology']
def find_synonym(s: str, min_score: int = 60):
results = process.extractBests(s, choices=synonyms, score_cutoff=min_score)
if not results:
return None
return results[0][-1]
def process_data(l: list, min_score: int = 60):
matches = []
no_matches = []
for item in l:
syn = find_synonym(item, min_score=min_score)
if syn is not None:
new_item = f'{syn} {item}' if syn not in item else item
matches.append(new_item)
elif any(fuzz.partial_ratio(s, item) >= min_score for s in synonyms.keys()):
# one of the synonyms is already in the item string
matches.append(item)
else:
no_matches.append(item)
return matches, no_matches
For process_data(vendor_data) we get:
(['i7 processor',
'ssd solid state',
'processor Corei5 :1135G7 (11th Generation)',
'hdd hard drive',
'ram ddr 8gb',
'ram HT (100W) DDR4-2400'],
['something1', 'something2', 'something3'])
And for process_data(buyer_data):
(['i7 processor 12 generation', 'processor corei7:latest technology'], [])
I had to lower the cut-off score to 60 to also get results for ddr 8gb. The process_data function returns 2 lists: One with matches with words from the synonyms dict and one with items without matches. If you want exactly the output you listed in your question, just concatenate the two lists like this:
matches, no_matches = process_data(vendor_data)
matches + no_matches # ['i7 processor', 'ssd solid state', 'processor Corei5 :1135G7 (11th Generation)', 'hdd hard drive', 'ram ddr 8gb', 'ram HT (100W) DDR4-2400', 'something1', 'something2', 'something3']
I have tried to come up with a decent answer (certainly not the cleanest one)
import json
from itertools import zip_longest
from fuzzywuzzy import fuzz
synonyms = open("synonyms.json", "r")
synonyms = json.loads(synonyms.read())
vendor_data = ["i7 processor", "solid state", "Corei5 :1135G7 (11thGeneration)", "hard drive", "ddr 8gb", "something1",
"something2",
"something3", "HT (100W) DDR4-2400"]
buyer_data = ["i7 processor 12 generation", "corei7:latest technology"]
vendor = []
buyer = []
for k, k2 in zip_longest(vendor_data, buyer_data):
has_matched = False
for item, value in synonyms.items():
for v in value:
if fuzz.token_set_ratio(k, v) > 70:
if item in k:
vendor.append(k)
else:
vendor.append(item + " " + k)
if has_matched or k2 is None:
break
else:
has_matched = True
if fuzz.token_set_ratio(k2, v) > 70:
if item in k2:
buyer.append(k2)
else:
buyer.append(item + " " + k2)
if has_matched or k is None:
break
else:
has_matched = True
else:
continue # match not found
break # match is found
else: # only evaluates on normal loop end
# Only something strings
# do something with the new input values
continue
vendor = list(set(vendor))
buyer = list(set(buyer))
I hope you can achieve what you want with this code. Check the docs if you don't know what a for else loop does. TLDR: the else clause executes when the loop terminates normally (not with a break). Note that I put the synonyms loop inside the data loop. This is because we can't certainly know in which synonym group the data belongs, also somethimes the vendor data entry is a processor while the buyer data is memory. Also note that I have assumed an item can't match more than 1 time. If this could be the case you would need to make a more advanced check (just make a counter and break when the counter equals 2 for example).
EDIT:
I took another look at the question and came up with maybe a better answer:
v_dict = dict()
for spec in vendor_data[:]:
for item, choices in synonyms.items():
if process.extractOne(spec, choices)[1] > 70: # don't forget to import process from fuzzywuzzy
v_dict[spec] = item
break
else:
v_dict[spec] = "Something new"
This code matches the strings to the correct type. for example {'i7 processor': 'processor', 'solid state': 'ssd', 'Corei5 :1135G7 (11thGeneration)': 'processor', 'hard drive': 'ssd', 'ddr 8gb': 'ram', 'something1': 'Something new', 'something2': 'Something new', 'something3': 'Something new', 'HT (100W) DDR4-2400': 'ram'}. You can change the "Something new" with watherver you like. You could also do: v_dict[spec] = 0 (on a match) and v_dict[spec] = 1 (on no match). You could then sort the dict ->
it = iter(v_dict.values())
print(sorted(v_dict.keys(), key=lambda x: next(it)))
Which would give the wanted results (more or less), all the recognised items will be first, and then all the unrecognised items. You could do some more advanced sorting on this dict if you want. I think this code gives you enough flexibility to reach your goal.
If I understand correctly, what you are trying to do is match keywords specified by a customer and/or vendor against a predefined database of keywords you have.
First, I would highly recommend using a reversed mapping of the synonyms, so it's faster to lookup, especially when the dataset will grow.
Second, considering the fuzzywuzzy API, it looks like you simply want the best match, so extractOne is a solid choice for that.
Now, extractOne returns the best match and a score:
>>> process.extractOne("cowboys", choices)
("Dallas Cowboys", 90)
I would split the algorithm into two:
A generic part that simply gets the best match, which should always exist (even if it's not a great one)
A filter, where you could adjust the sensitivity of the algorithm, based on different criteria of your application. This sensitivity threshold should set the minimal match quality. If you're below this threshold, just use "untagged" for the category for example.
Here is the final code, which I think is very simple and easy to understand and expand:
import json
from fuzzywuzzy import process
def load_synonyms():
with open('synonyms.json') as fin:
synonyms = json.load(fin)
# Reversing the map makes it much easier to lookup
reversed_synonyms = {}
for key, values in synonyms.items():
for value in values:
reversed_synonyms[value] = key
return reversed_synonyms
def load_vendor_data():
return [
"i7 processor",
"solid state",
"Corei5 :1135G7 (11thGeneration)",
"hard drive",
"ddr 8gb",
"something1",
"something2",
"something3",
"HT (100W) DDR4-2400"
]
def load_customer_data():
return [
"i7 processor 12 generation",
"corei7:latest technology"
]
def get_tag(keyword, synonyms):
THRESHOLD = 80
DEFAULT = 'general'
tag, score = process.extractOne(keyword, synonyms.keys())
return synonyms[tag] if score > THRESHOLD else DEFAULT
def main():
synonyms = load_synonyms()
customer_data = load_customer_data()
vendor_data = load_vendor_data()
data = customer_data + vendor_data
tags_dict = { keyword: get_tag(keyword, synonyms) for keyword in data }
print(json.dumps(tags_dict, indent=4))
if __name__ == '__main__':
main()
When running with the specified inputs, the output is:
{
"i7 processor 12 generation": "processor",
"corei7:latest technology": "processor",
"i7 processor": "processor",
"solid state": "ssd",
"Corei5 :1135G7 (11thGeneration)": "processor",
"hard drive": "hdd",
"ddr 8gb": "ram",
"something1": "general",
"something2": "general",
"something3": "general",
"HT (100W) DDR4-2400": "ram"
}

Finding relationships between values based on their name in Python with Panda

I want to make relationship between values by their Name based on below rules:
1- I have a CSV file (with more than 100000 rows) that consists of lots of values, I shared some examples as below:
Name:
A02-father
A03-father
A04-father
A05-father
A07-father
A08-father
A09-father
A17-father
A18-father
A20-father
A02-SA-A03-SA
A02-SA-A04-SA
A03-SA-A02-SA
A03-SA-A05-SA
A03-SA-A17-SA
A04-SA-A02-SA
A04-SA-A09-SA
A05-SA-A03-SA
A09-SA-A04-SA
A09-SA-A20-SA
A17-SA-A03-SA
A17-SA-A18-SA
A18-SA-A17-SA
A20-SA-A09-SA
A05-NA
B02-Father
B04-Father
B06-Father
B02-SA-B04-SA
B04-SA-BO2-SA
B04-SA-B06-SA
B06-SA-B04-SA
B06-NA
2- Now I have another CSV file which let me know from which value I should start? in this case the value is
A03-father & B02-father & ... which dont have any influence on each other and they all have seperate path to go, so for each path we will start from mentioned start point.
father.csv
A03-father
B02-father
....
3- Based on the naming I want to make the relationships, As A03-Father has been determined as Father I should check for any value which has been started with A03.(All of them are A0's babies.)
Also as B02 is father, we will check for any value which starts with B02. (B02-SA-B04-SA)
4- Now If I find out A03-SA-A02-SA , this is A03's baby.
I find out A03-SA-A05-SA , this is A03's baby.
I find out A03-SA-A17-SA , this is A03's baby.
and after that I must check any node which starts with A02 & A05 & A17:
As you see A02-Father exists so it is Father and now we will search for any string which starts with A02 and doesn't have A03 which has been detected as Father(It must be ignored)
This must be checked till end of values which exist in the CSV file.
As you see I should check the path based on name (REGEX) and should go forward till end of path.
The expected result:
Father Baby
A03-father A03-SA-A02-SA
A03-father A03-SA-A05-SA
A03-father A03-SA-A17-SA
A02-father A02-SA-A04-SA
A05-father A05-NA
A17-father A17-SA-A18-SA
A04-father A04-SA-A09-SA
A02-father A02-SA-A04-SA
A09-father A09-SA-A20-SA
B02-father B02-SA-B04-SA
B04-father B04-SA-B06-SA
B06-father B06-NA
I have coded it as below with pandas:
import pandas as pd
import numpy as np
import re
#Read the file which consists of all Values
df = pd.read_csv("C:\\total.csv")
#Read the file which let me know who is father
Fa = pd.read_csv("C:\\Father.csv")
#Get the first part of Father which is A0
Fa['sub'] = Fa['Name'].str.extract(r'(\w+\s*)', expand=False)
r2 = []
#check in all the csv file and find anything which starts with A0 and is not Father
for f in Fa['sub']:
baby=(df[df['Name'].str.startswith(f) & ~df['Name'].str.contains('Father')])
baby['sub'] = bay['Name'].str.extract(r'(\w+\s*)', expand=False)
r1= pd.merge(Fa, baby, left_on='sub', right_on='sub',suffixes=('_f', '_c'))
r2.append(result1)
out_df = pd.concat(result2)
out_df= out_df.replace(np.nan, '', regex=True)
#find A0-N-A2-M and A0-N-A4-M
out_df.to_csv('C:\\child1.csv')
#check in all the csv file and find anything which starts with the second part of child1 which is A2 and A4
out_df["baby2"] = out_df['Name_baby'].str.extract(r'^(?:[^-]*-){2}\s*([^-]+)', expand=False)
baby3= out_df["baby2"]
r4 = []
for f in out_df["baby2"]:
#I want to exclude A0 which has been detected.
l = ['A0']
regstr = '|'.join(l)
baby1=(df[df['Name'].str.startswith(f) & ~df['Name'].str.contains(regstr)])
baby1['sub'] = baby1['Name'].str.extract(r'(\w+\s*)', expand=False)
r3= pd.merge(baby3, baby1, left_on='baby2', right_on='sub',suffixes=('_f', '_c'))
r4.append(r3)
out2_df = pd.concat(r4)
out2_df.to_csv('C:\\child2.csv')
I want to put below code in a loop and go through the file and check it, based on naming process and detect other fathers and babies till it finished. however this code is not customized and doesn't have the exact result as i expected.
my question is about how to make the loop?
I should go through the path and also consider regstr value for any string.
#check in all the csv file and find anything which starts with the second part of child1 which is A2 and A4
out_df["baby2"] = out_df['Name_baby'].str.extract(r'^(?:[^-]*-){2}\s*([^-]+)', expand=False)
baby3= out_df["baby2"]
r4 = []
for f in out_df["baby2"]:
#I want to exclude A0 which has been detected.
l = ['A0']
regstr = '|'.join(l)
baby1=(df[df['Name'].str.startswith(f) & ~df['Name'].str.contains(regstr)])
baby1['sub'] = baby1['Name'].str.extract(r'(\w+\s*)', expand=False)
r3= pd.merge(baby3, baby1, left_on='baby2', right_on='sub',suffixes=('_f', '_c'))
r4.append(r3)
out2_df = pd.concat(r4)
out2_df.to_csv('C:\\child2.csv')
Start with import collections (will be needed soon).
I assume that you have already read df and Fa DataFrames.
The first part of my code is to create children Series (index - parent,
value - child):
isFather = df.Name.str.contains('-father', case=False)
dfChildren = df[~isFather]
key = []; val = []
for fath in df[isFather].Name:
prefix = fath.split('-')[0]
for child in dfChildren[dfChildren.Name.str.startswith(prefix)].Name:
key.append(prefix)
val.append(child)
children = pd.Series(val, index=key)
Print children to see the result.
The second part is to create the actual result, starting from each
starting points in Fa:
nodes = collections.deque()
father = []; baby = [] # Containers for source data
# Loop for each starting point
for startNode in Fa.Name.str.split('-', expand=True)[0]:
nodes.append(startNode)
while nodes:
node = nodes.popleft() # Take node name from the queue
# Children of this node
myChildren = children[children.index == node]
# Process children (ind - father, val - child)
for ind, val in myChildren.items():
parts = val.split('-') # Parts of child name
# Child "actual" name (if exists)
val_2 = parts[2] if len(parts) >= 3 else ''
if val_2 not in father: # val_2 not "visited" before
# Add father / child name to containers
father.append(ind)
baby.append(val)
if len(val_2) > 0:
nodes.append(val_2) # Add to the queue, to be processe later
# Drop rows for "node" from "children" (if any exists)
if (children.index == node).sum() > 0:
children.drop(node, inplace=True)
# Convert to a DataFrame
result = pd.DataFrame({'Father': father, 'Baby': baby})
result.Father += '-father' # Add "-father" to "bare" names
I added -father with lower case "f", but I think this is not much
significant detail.
The result, for your data sample, is:
Father Baby
0 A03-father A03-SA-A02-SA
1 A03-father A03-SA-A05-SA
2 A03-father A03-SA-A17-SA
3 A02-father A02-SA-A04-SA
4 A05-father A05-NA
5 A17-father A17-SA-A18-SA
6 A04-father A04-SA-A09-SA
7 A09-father A09-SA-A20-SA
8 B02-father B02-SA-B04-SA
9 B04-father B04-SA-B06-SA
10 B06-father B06-NA
And two remarks concerning your data sample:
You wrote B04-SA-B02-SA with capital O (a letter) instead of 0
(zero). I corrected it in my source data.
Row A02-father A02-SA-A04-SA in your expected result is doubled.
I assume it should occur only once.
Commented inline
def find(data, from_pos=0):
fathers = {}
skip = []
for x in data[from_pos:]:
tks = x.split("-")
# Is it father ?
if tks[1].lower() == "father":
fathers[tks[0]] = x
else:
if tks[0] in fathers and tks[-2] not in skip:
print (fathers[tks[0]], x)
# Skip this father appearing as child later
skip.append(tks[0])
Testcase:
data = [
'A0-Father',
'A0-N-A2-M',
'A0-N-A4-M',
'A2-Father',
'A2-M-A0-N',
'A2-N-A8-M',
'A8-father',
'A8-M-A11-N',
'A8-M-A2-N']
find(data, from_pos=0)
Output:
A0-Father A0-N-A2-M
A0-Father A0-N-A4-M
A2-Father A2-N-A8-M
A8-father A8-M-A11-N
Edit 1:
Start with some data for testing
data = [
'A02-father',
'A03-father',
'A04-father',
'A05-father',
'A07-father',
'A08-father',
'A09-father',
'A17-father',
'A18-father',
'A20-father',
'A02-SA-A03-SA',
'A02-SA-A04-SA',
'A03-SA-A02-SA',
'A03-SA-A05-SA',
'A03-SA-A17-SA',
'A04-SA-A02-SA',
'A04-SA-A09-SA',
'A05-SA-A03-SA',
'A09-SA-A04-SA',
'A09-SA-A20-SA',
'A17-SA-A03-SA',
'A17-SA-A18-SA',
'A18-SA-A17-SA',
'A20-SA-A09-SA',
'A05-NA',
]
father = [
'A03-father',
]
First let us make a data structure so that manipulations will be easy and lookups for relationships will be fast as you have huge data
def make_data_structure(data):
all_fathers, all_relations = {}, {}
for x in data:
tks = x.split("-")
if tks[1].lower() == "father":
all_fathers[tks[0]] = x
else:
if len(tks) == 2:
tks.extend(['NA', 'NA'])
if tks[0] in all_relations:
all_relations[tks[0]][0].append(tks[-2])
all_relations[tks[0]][1].append(x)
else:
all_relations[tks[0]] =[[tks[-2]], [x]]
return all_fathers, all_relations
all_fathers, all_relations = make_data_structure(data)
all_fathers, all_relations
Output:
{'A02': 'A02-father',
'A03': 'A03-father',
'A04': 'A04-father',
'A05': 'A05-father',
'A07': 'A07-father',
'A08': 'A08-father',
'A09': 'A09-father',
'A17': 'A17-father',
'A18': 'A18-father',
'A20': 'A20-father'},
{'A02': [['A03', 'A04'], ['A02-SA-A03-SA', 'A02-SA-A04-SA']],
'A03': [['A02', 'A05', 'A17'],
['A03-SA-A02-SA', 'A03-SA-A05-SA', 'A03-SA-A17-SA']],
'A04': [['A02', 'A09'], ['A04-SA-A02-SA', 'A04-SA-A09-SA']],
'A05': [['A03', 'NA'], ['A05-SA-A03-SA', 'A05-NA']],
'A09': [['A04', 'A20'], ['A09-SA-A04-SA', 'A09-SA-A20-SA']],
'A17': [['A03', 'A18'], ['A17-SA-A03-SA', 'A17-SA-A18-SA']],
'A18': [['A17'], ['A18-SA-A17-SA']],
'A20': [['A09'], ['A20-SA-A09-SA']]}
As you can see all_fathers holds all the parents and most imporantly all_relations hold the father-child relationship which can be indexed using the father for faster lookups.
How lets do the actual parsing of the relationships
def find(all_fathers, all_relations, from_father):
fathers = [from_father]
skip = []
while True:
if len(fathers) == 0:
break
current_father = fathers[0]
fathers = fathers[1:]
for i in range(len(all_relations[current_father][0])):
if not all_relations[current_father][0][i] in skip:
print (all_fathers[current_father], all_relations[current_father][1][i])
if all_relations[current_father][0][i] != 'NA':
fathers.append(all_relations[current_father][0][i])
skip.append(current_father)
for x in father:
find(all_fathers, all_relations, x.split("-")[0])
Output:
A03-father A03-SA-A02-SA
A03-father A03-SA-A05-SA
A03-father A03-SA-A17-SA
A02-father A02-SA-A04-SA
A05-father A05-NA
A17-father A17-SA-A18-SA
A04-father A04-SA-A09-SA
A09-father A09-SA-A20-SA
Edit 2:
New test cases; [You will have to load the values in father.csv to a list called father].
data = [
'A02-father',
'A03-father',
'A04-father',
'A05-father',
'A07-father',
'A08-father',
'A09-father',
'A17-father',
'A18-father',
'A20-father',
'A02-SA-A03-SA',
'A02-SA-A04-SA',
'A03-SA-A02-SA',
'A03-SA-A05-SA',
'A03-SA-A17-SA',
'A04-SA-A02-SA',
'A04-SA-A09-SA',
'A05-SA-A03-SA',
'A09-SA-A04-SA',
'A09-SA-A20-SA',
'A17-SA-A03-SA',
'A17-SA-A18-SA',
'A18-SA-A17-SA',
'A20-SA-A09-SA',
'A05-NA',
'B02-Father',
'B04-Father',
'B06-Father',
'B02-SA-B04-SA',
'B04-SA-B02-SA',
'B04-SA-B06-SA',
'B06-SA-B04-SA',
'B06-NA',
]
father = [
'A03-father',
'B02-father'
]
for x in father:
find(all_fathers, all_relations, x.split("-")[0])
Output:
A03-father A03-SA-A02-SA
A03-father A03-SA-A05-SA
A03-father A03-SA-A17-SA
A02-father A02-SA-A04-SA
A05-father A05-NA
A17-father A17-SA-A18-SA
A04-father A04-SA-A09-SA
A09-father A09-SA-A20-SA
B02-Father B02-SA-B04-SA
B04-Father B04-SA-B06-SA
B06-Father B06-NA

Search in List; Display names based on search input

I have sought different articles here about searching data from a list, but nothing seems to be working right or is appropriate in what I am supposed to implement.
I have this pre-created module with over 500 list (they are strings, yes, but is considered as list when called into function; see code below) of names, city, email, etc. The following are just a chunk of it.
empRecords="""Jovita,Oles,8 S Haven St,Daytona Beach,Volusia,FL,6/14/1965,32114,386-248-4118,386-208-6976,joles#gmail.com,http://www.paganophilipgesq.com,;
Alesia,Hixenbaugh,9 Front St,Washington,District of Columbia,DC,3/3/2000,20001,202-646-7516,202-276-6826,alesia_hixenbaugh#hixenbaugh.org,http://www.kwikprint.com,;
Lai,Harabedian,1933 Packer Ave #2,Novato,Marin,CA,1/5/2000,94945,415-423-3294,415-926-6089,lai#gmail.com,http://www.buergimaddenscale.com,;
Brittni,Gillaspie,67 Rv Cent,Boise,Ada,ID,11/28/1974,83709,208-709-1235,208-206-9848,bgillaspie#gillaspie.com,http://www.innerlabel.com,;
Raylene,Kampa,2 Sw Nyberg Rd,Elkhart,Elkhart,IN,12/19/2001,46514,574-499-1454,574-330-1884,rkampa#kampa.org,http://www.hermarinc.com,;
Flo,Bookamer,89992 E 15th St,Alliance,Box Butte,NE,12/19/1957,69301,308-726-2182,308-250-6987,flo.bookamer#cox.net,http://www.simontonhoweschneiderpc.com,;
Jani,Biddy,61556 W 20th Ave,Seattle,King,WA,8/7/1966,98104,206-711-6498,206-395-6284,jbiddy#yahoo.com,http://www.warehouseofficepaperprod.com,;
Chauncey,Motley,63 E Aurora Dr,Orlando,Orange,FL,3/1/2000,32804,407-413-4842,407-557-8857,chauncey_motley#aol.com,http://www.affiliatedwithtravelodge.com
"""
a = empRecords.strip().split(";")
And I have the following code for searching:
import empData as x
def seecity():
empCitylist = list()
for ct in x.a:
empCt = ct.strip().split(",")
empCitylist.append(empCt)
t = sorted(empCitylist, key=lambda x: x[3])
for c in t:
city = (c[3])
print(city)
live_city = input("Enter city: ")
for cy in city:
if live_city in cy:
print(c[1])
# print("Name: "+ c[1] + ",", c[0], "| Current City: " + c[3])
Forgive my idiotic approach as I am new to Python. However, what I am trying to do is user will input the city, then the results should display the employee's last name, first name who are living in that city (I dunno if I made sense lol)
By the way, the code I used above doesn't return any answers. It just loops to the input.
Thank you for helping. Lovelots. <3
PS: the format of the empData is: first name, last name, address, city, country, birthday, zip, phone, and email
You can use the csv module to read easily a file with comma separated values
import csv
with open('test.csv', newline='') as csvfile:
records = list(csv.reader(csvfile))
def search(data, elem, index):
out = list()
for row in data:
if row[index] == elem:
out.append(row)
return out
#test
print(search(records, 'Orlando', 3))
Based on your original code, you can do it like this:
# Make list of list records, sorted by city
t = sorted((ct.strip().split(",") for ct in x.a), key=lambda x: x[3])
# List cities
print("Cities in DB:")
for c in t:
city = (c[3])
print("-", city)
# Define search function
def seecity():
live_city = input("Enter city: ")
for c in t:
if live_city == c[3]:
print("Name: "+ c[1] + ",", c[0], "| Current City: " + c[3])
seecity()
Then, after you understand what's going on, do as #Hoxha Alban suggested, and use the csv module.
The beauty of python lies in list comprehension.
empRecords="""Jovita,Oles,8 S Haven St,Daytona Beach,Volusia,FL,6/14/1965,32114,386-248-4118,386-208-6976,joles#gmail.com,http://www.paganophilipgesq.com,;
Alesia,Hixenbaugh,9 Front St,Washington,District of Columbia,DC,3/3/2000,20001,202-646-7516,202-276-6826,alesia_hixenbaugh#hixenbaugh.org,http://www.kwikprint.com,;
Lai,Harabedian,1933 Packer Ave #2,Novato,Marin,CA,1/5/2000,94945,415-423-3294,415-926-6089,lai#gmail.com,http://www.buergimaddenscale.com,;
Brittni,Gillaspie,67 Rv Cent,Boise,Ada,ID,11/28/1974,83709,208-709-1235,208-206-9848,bgillaspie#gillaspie.com,http://www.innerlabel.com,;
Raylene,Kampa,2 Sw Nyberg Rd,Elkhart,Elkhart,IN,12/19/2001,46514,574-499-1454,574-330-1884,rkampa#kampa.org,http://www.hermarinc.com,;
Flo,Bookamer,89992 E 15th St,Alliance,Box Butte,NE,12/19/1957,69301,308-726-2182,308-250-6987,flo.bookamer#cox.net,http://www.simontonhoweschneiderpc.com,;
Jani,Biddy,61556 W 20th Ave,Seattle,King,WA,8/7/1966,98104,206-711-6498,206-395-6284,jbiddy#yahoo.com,http://www.warehouseofficepaperprod.com,;
Chauncey,Motley,63 E Aurora Dr,Orlando,Orange,FL,3/1/2000,32804,407-413-4842,407-557-8857,chauncey_motley#aol.com,http://www.affiliatedwithtravelodge.com
"""
rows = empRecords.strip().split(";")
data = [ r.strip().split(",") for r in rows ]
then you can use any condition to filter the list, like
print ( [ "Name: " + emp[1] + "," + emp[0] + "| Current City: " + emp[3] for emp in data if emp[3] == "Washington" ] )
['Name: Hixenbaugh,Alesia| Current City: Washington']

Error when creating dictionaries from text files

I've been working on a function which will update two dictionaries (similar authors, and awards they've won) from an open text file. The text file looks something like this:
Brabudy, Ray
Hugo Award
Nebula Award
Saturn Award
Ellison, Harlan
Heinlein, Robert
Asimov, Isaac
Clarke, Arthur
Ellison, Harlan
Nebula Award
Hugo Award
Locus Award
Stephenson, Neil
Vonnegut, Kurt
Morgan, Richard
Adams, Douglas
And so on. The first name is an authors name (last name first, first name last), followed by awards they may have won, and then authors who are similar to them. This is what I've got so far:
def load_author_dicts(text_file, similar_authors, awards_authors):
name_of_author = True
awards = False
similar = False
for line in text_file:
if name_of_author:
author = line.split(', ')
nameA = author[1].strip() + ' ' + author[0].strip()
name_of_author = False
awards = True
continue
if awards:
if ',' in line:
awards = False
similar = True
else:
if nameA in awards_authors:
listawards = awards_authors[nameA]
listawards.append(line.strip())
else:
listawards = []
listawards.append(line.strip()
awards_authors[nameA] = listawards
if similar:
if line == '\n':
similar = False
name_of_author = True
else:
sim_author = line.split(', ')
nameS = sim_author[1].strip() + ' ' + sim_author[0].strip()
if nameA in similar_authors:
similar_list = similar_authors[nameA]
similar_list.append(nameS)
else:
similar_list = []
similar_list.append(nameS)
similar_authors[nameA] = similar_list
continue
This works great! However, if the text file contains an entry with just a name (i.e. no awards, and no similar authors), it screws the whole thing up, generating an IndexError: list index out of range at this part Zname = sim_author[1].strip()+" "+sim_author[0].strip() )
How can I fix this? Maybe with a 'try, except function' in that area?
Also, I wouldn't mind getting rid of those continue functions, I wasn't sure how else to keep it going. I'm still pretty new to this, so any help would be much appreciated! I keep trying stuff and it changes another section I didn't want changed, so I figured I'd ask the experts.
How about doing it this way, just to get the data in, then manipulate the dictionary any ways you want.
test.txt contains your data
Brabudy, Ray
Hugo Award
Nebula Award
Saturn Award
Ellison, Harlan
Heinlein, Robert
Asimov, Isaac
Clarke, Arthur
Ellison, Harlan
Nebula Award
Hugo Award
Locus Award
Stephenson, Neil
Vonnegut, Kurt
Morgan, Richard
Adams, Douglas
And my code to parse it.
award_parse.py
data = {}
name = ""
awards = []
f = open("test.txt")
for l in f:
# make sure the line is not blank don't process blank lines
if not l.strip() == "":
# if this is a name and we're not already working on an author then set the author
# otherwise treat this as a new author and set the existing author to a key in the dictionary
if "," in l and len(name) == 0:
name = l.strip()
elif "," in l and len(name) > 0:
# check to see if recipient is already in list, add to end of existing list if he/she already
# exists.
if not name.strip() in data:
data[name] = awards
else:
data[name].extend(awards)
name = l.strip()
awards = []
# process any lines that are not blank, and do not have a ,
else:
awards.append(l.strip())
f.close()
for k, v in data.items():
print("%s got the following awards: %s" % (k,v))

Add edge between nodes if list contains part of another list

I'm trying to add edges between nodes.
I have a text file which I have put into a list.
The first list contains this:
Title , Rating
[('"$weepstake$" (1979) {(#1.2)}', '10.0'),
('"\'Til Death Do Us Part" (2006) {Pilot(#1.0)}', '3.7'),
('"\'Conversations with My Wife\'" (2010)', '4.2'),
('"\'Da Kink in My Hair" (2007)', '4.2').....much more here ]
I want to create nodes labeled with all the titles and when two titles have the same rating, then I want to create an edge between them, so I - in the end - get all titles with rating 10.0 together in one network and so on.
My code so far:
import networkx as nx
import string
from sys import maxint
import csv
import pprint
import re
def printStuff(labels,dG):
for index, node in enumerate(dG.nodes()):
print '%s:%d\n' % (labels[index],dG.node[node]['count'])
str1 = titleList
#print str1
get_user_info = titleList1
dG = nx.DiGraph()
for i, word in enumerate(str1):
try:
next_word = str1[i]
if not dG.has_node(word):
dG.add_node(word)
dG.node[word]['count'] = 1
else:
dG.node[word]['count'] += 1
if not dG.has_node(next_word):
dG.add_node(next_word)
dG.node[next_word]['count'] = 0
if not dG.has_edge(word, next_word):
dG.add_edge(word, next_word, weight=0)
else:
dG.edge[word][next_word]['weight'] += 1
except IndexError:
if not dG.has_node(word):
dG.add_node(word)
dG.node[word]['count'] = 1
else:
dG.node[word]['count'] += 1
except:
raise
printStuff(titleList, dG)
Output:
10.0:1
10.0:1
3.7:1
10.0:1
3.7:1
4.2:1
10.0:1
3.7:1
4.2:1
4.2:1
And for edges:
for edge in dG.edges():
print '%s:%d\n' % (edge,dG.edge[edge[0]][edge[1]]['weight'])
Output:
(('"\'Conversations with My Wife\'" (2010)', '4.2'), ('"\'Conversations with My Wife\'" (2010)', '4.2')):0
(('"\'Da Kink in My Hair" (2007)', '4.2'), ('"\'Da Kink in My Hair" (2007)', '4.2')):0
(('"$weepstake$" (1979) {(#1.2)}', '10.0'), ('"$weepstake$" (1979) {(#1.2)}', '10.0')):0
(('"\'Til Death Do Us Part" (2006) {Pilot (#1.0)}', '3.7'), ('"\'Til Death Do Us Part" (2006) {Pilot (#1.0)}', '3.7')):0
How about this:
data = [('"$weepstake$" (1979) {(#1.2)}', '10.0'),
('"\'Til Death Do Us Part" (2006) {Pilot(#1.0)}', '3.7'),
('"\'Conversations with My Wife\'" (2010)', '4.2'),
('"\'Da Kink in My Hair" (2007)', '4.2')]
import networkx as nx
G = nx.Graph()
G.add_edges_from(data)
nx.draw(G)
if you want a count of edges from a score.
len(G.edges('4.2'))
2

Categories