Trying to save my dict code to a .csv file - python

I am trying to save this dict code into a .csv file, with one column for the key and one column for the frequency counted. But I when I try to run the code below, I get an error at the end for line 6. What am I doing wrong?
encryptedA_edited = abcfreq(encryptedA)
import csv
with open("EncryptA_Edited.csv", "w", newline="") as ECA:
writer = csv.writer(ECA)
writer.writerows(encryptedA_edited.items())
'NoneType' object has no attribute 'items'
This is my function:
import string
encryptedA = open("encryptedA.txt")
encryptedA = encryptedA.read()
encryptedB = open("encryptedB.txt")
encryptedB = encryptedB.read()
def abcfreq(lettervalues):
lettervalues = lettervalues.lower().strip()
freq = {}
j =0
for x in string.ascii_lowercase:
freq[x] = 0
for j in lettervalues:
if j in freq:
freq[j] += 1
for key, value in (freq.items()):
print("%s:%d" % (key, value))
max_value = max(freq.values())
max_key = max(freq, key =freq.get)
print("The letter with the highest frequency is " + max_key +":"+str(max_value))

This should be what you're looking for.
Added a few functions for your convenience and shows how to view a file once it's been created
from random import choice, randint
import csv, os, string
def freq(element,iterable) -> int:
return sum(1 for i in iterable if i==element)
def sample(sequence,length):
'''
Just for "generating" test strings
breaks if the sequence does not have a __len__ attribute
'''
for i in range(length):
yield choice(sequence)
def ascii(omissions:str='',include:bool=False) -> str:
"""
A convenient ascii character set
Return an ascii character set excluding the given omissions:
"p" -> ' ' + punctuation
"u" -> uppercase
"l" -> lowercase
"d" -> digits
Feel free to omit combinations:
>>> ascii('lup')
... 0123456789
or include them
>>> ascii('d',True)
... 0123456789
"""
d = {
"p":" "+string.punctuation,
"u":string.ascii_uppercase,
"l":string.ascii_lowercase,
"d":string.digits,
}
return "".join(d[k] for k in d if k in omissions) if include else "".join(d[k] for k in d if not k in omissions)
def weights(string,omissions='',include=False):
# return {i:freq(i,string) for i in set(string)} ## if you only want to measure elements of the string
return {i:freq(i,string) for i in ascii(omissions,include)}
heaviest = lambda string: max(string,key=weights(string).get)
if __name__ == '__main__':
s = 'abcda'
print(s,heaviest(s),sep='\n\t') # 'a'
strings = [''.join(sample(ascii('l',True)[:4],randint(3,5))) for i in range(4)]
for s in strings:
path = s+'.csv'
with open(path, "w", newline="") as ECA:
writer = csv.writer(ECA)
writer.writerows(weights(s,'l',True).items())
os.startfile(path)

Related

near duplicate detection python sql

I have a huge data set which contains shipper/supplier names from different sources and are having near duplicate values in it.
I tried so many different techniques available on the internet but none of them were quit satisfying or was too slow for this huge data.
I found this openrefine GitHub repo for fingerprinting algorithms and I added some more code and it solved my purpose.
Have a look.
My dataset something looks like this...
import re, string
import pandas as pd
from unidecode import unidecode
from collections import defaultdict
# clean the text before processing
def cleansing_special_characters(txt):
seps = [' ',';',':','.','`','~',',','*','#','#','|','\\','-','_','?','%','!','^','(',')','[',']','{','}','$','=','+','"','<','>',"'",' AND ', ' and ']
default_sep = seps[0]
txt = str(txt)
for sep in seps[1:]:
if sep == " AND " or sep == " and ":
txt = txt.upper()
txt = txt.replace(sep, ' & ')
else:
txt = txt.upper()
txt = txt.replace(sep, default_sep)
try :
list(map(int,txt.split()))
txt = 'NUMBERS'
except:
pass
txt = re.sub(' +', ' ', txt)
temp_list = [i.strip() for i in txt.split(default_sep)]
temp_list = [i for i in temp_list if i]
return " ".join(temp_list)
punctuation = re.compile('[%s]' % re.escape(string.punctuation))
class fingerprinter(object):
# __init__function
def __init__(self, string):
self.string = self._preprocess(string)
# strip leading, trailing spaces and to lower case
def _preprocess(self, string):
return punctuation.sub('',string.strip().lower())
def _latinize(self, string):
return unidecode(string)
# return unidecode(string.decode('utf-8'))
def _unique_preserve_order(self,seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
#-####################################################
def get_fingerprint(self):
return self._latinize(' '.join(self._unique_preserve_order(sorted(self.string.split()))))
def get_ngram_fingerprint(self, n=1):
return self._latinize(''.join(self._unique_preserve_order(sorted([self.string[i:i + n] for i in range(len(self.string) - n +1)]))))
# read excel file
df = pd.read_excel('Input_File.xlsx')
#preprocess the column
df['Clean'] = df['SUPPLIER_NAME'].apply(cleansing_special_characters)
# step 1 cleanining
# ##for n_gram fingerprint algorithm
###########################################################################################
df['n_gram_fingerprint_n2'] = df['Clean'].apply(lambda x : fingerprinter(x.replace(" ","")).get_ngram_fingerprint(n=2))
## generate tag_id for every unique generated n_gram_fingerprint
d = defaultdict(lambda: len(d))
df['tag_idn']=[d[x] for x in df['n_gram_fingerprint_n2']]
###########################################################################################
#drop n_gram column
df.drop(columns=['n_gram_fingerprint_n2'], inplace=True)
# make copy to create group of tag_id
df1 = df[['SUPPLIER_NAME','tag_idn']]
# drop SUPPLIER_NAME column , we have tag_id's now
df.drop(columns=['SUPPLIER_NAME'], inplace=True)
# group df with tag_id with selecting minimum
#group = df.groupby('tag_id').min().reset_index()
group = df.loc[df["Clean"].str.len().groupby(df["tag_idn"]).idxmax()]
# join both the data frames group(unique) and main data
df_merge = pd.merge(df1,group, on=['tag_idn'])
# # output excel file
df_merge.to_excel('Output_File.xlsx', index = False)
This is what the outpout data in an excel file looks like

Many emoji characters are not read by python file read

I have a list of 1500 emoji character dictionary in a json file, and I wanted to import those to my python code, I did a file read and convert it to a python dictionary but now I have only 143 records. How can I import all the emoji to my code, this is my code.
import sys
import ast
file = open('emojidescription.json','r').read()
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
emoji_dictionary = ast.literal_eval(file.translate(non_bmp_map))
#word = word.replaceAll(",", " ");
keys = list(emoji_dictionary["emojis"][0].keys())
values = list(emoji_dictionary["emojis"][0].values())
file_write = open('output.txt','a')
print(len(keys))
for i in range(len(keys)):
try:
content = 'word = word.replace("{0}", "{1}")'.format(keys[i],values[i][0])
except Exception as e:
content = 'word = word.replace("{0}", "{1}")'.format(keys[i],'')
#file.write()
#print(keys[i],values[i])
print(content)
file_write.close()
This is my input sample
{
"emojis": [
{
"👨‍🎓": ["Graduate"],
"©": ["Copy right"],
"®": ["Registered"],
"👨‍👩‍👧": ["family"],
"👩‍❤️‍💋‍👩": ["love"],
"™": ["trademark"],
"👨‍❤‍👨": ["love"],
"⌚": ["time"],
"⌛": ["wait"],
"⭐": ["star"],
"🐘": ["Elephant"],
"🐕": ["Cat"],
"🐜": ["ant"],
"🐔": ["cock"],
"🐓": ["cock"],
This is my result, and the 143 denotes number of emoji.
143
word = word.replace("�‍�‍�‍�", "family")
word = word.replace("Ⓜ", "")
word = word.replace("♥", "")
word = word.replace("♠", "")
word = word.replace("⌛", "wait")
I'm not sure why you're seeing only 143 records from an input of 1500 (your sample doesn't seem to display this behavior).
The setup doesn't seem to do anything useful, but what you're doing boils down to (simplified and skipping lots of details):
d = ..read json as python dict.
keys = d.keys()
values = d.values()
for i in range(len(keys)):
key = keys[i]
value = values[i]
and that should be completely correct. There are better ways to do this in Python, however, like using the zip function:
d = ..read json as python dict.
keys = d.keys()
values = d.values()
for key, value in zip(keys, values): # zip picks pair-wise elements
...
or simply asking the dict for its items:
for key, value in d.items():
...
The json module makes reading and writing json much simpler (and safer), and using the idiom from above the problem reduces to this:
import json
emojis = json.load(open('emoji.json', 'rb'))
with open('output.py', 'wb') as fp:
for k,v in emojis['emojis'][0].items():
val = u'word = word.replace("{0}", "{1}")\n'.format(k, v[0] if v else "")
fp.write(val.encode('u8'))
Why do you replace all emojis with 0xfffd in the lines:
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
emoji_dictionary = ast.literal_eval(file.translate(non_bmp_map))
Just don't to this!
Using json:
import json
with open('emojidescription.json', encoding="utf8") as emojis:
emojis = json.load(emojis)
with open('output.txt','a', encoding="utf8") as output:
for emoji, text in emojis["emojis"][0].items():
text = "" if not text else text[0]
output.write('word = word.replace("{0}", "{1}")\n'.format(emoji, text))

How to write an INI file with ConfigParser with duplicate options

I want to write an INI file with duplicate options,ie:
[test]
foo = value1
foo = value2
xxx = yyy
With ConfigParser.set only the last value is writed.
config = ConfigParser.ConfigParser()
config.read('example.cfg')
config.add_section('test')
config.set('test', service['foo'], service['value1'])
config.set('test', service['foo'], service['value2'])
config.set('test', service['xxx'], service['yyy'])
The result is:
[test]
foo = value2
xxx = yyy
Is there any way?
It looks like it isn't possible in a simple way. The default way ConfigParser stores is with dict's, i.e. one value per unique key.
In a similar question Python's ConfigParser unique keys per section the suggestions are to go with:
CongfigObj
Patched version of epydoc
i have a simple custom .ini parser in python (built for another project), which uses a list to store values but only if they are not in key=value format. if key=value then last key will be held since these are stored in a dictionary
The parser can also parse nested sections like:
[SECTION1][SECTION2]
key1=value1
; etc..
The code is below, it is easy to modify to store key/value in list instead of dictionary or even detect multiple key and rename to avoid collisions (e.g key, key$1 second key with same key value key and so on). use/modify as needed
##
#
# Simple .ini Parser for Python 2.x, 3.x
#
##
import re
class Ini_Parser():
"""Simple .ini parser for Python"""
NL = None
ACTUAL = {
'\\n' : "\n",
'\\t' : "\t",
'\\v' : "\v",
'\\f' : "\f"
}
def parseStr(s, q):
_self = Ini_Parser
endq = s.find(q, 1)
quoted = s[1:endq]
rem = s[endq+1:].strip()
for c,actual in _self.ACTUAL.items():
quoted = ( actual ).join( quoted.split( c ) )
quoted = ( '\\' ).join( quoted.split( '\\\\' ) )
return quoted, rem
def fromString(s, keysList=True, rootSection='_'):
_self = Ini_Parser
comments = [';', '#']
if rootSection: rootSection = str(rootSection)
else: rootSection = '_'
if not _self.NL:
_self.NL = re.compile(r'\n\r|\r\n|\r|\n')
sections = {}
currentSection = str(rootSection)
if keysList:
sections[currentSection] = { '__list__' : [] }
else:
sections[currentSection] = { }
currentRoot = sections
# parse the lines
lines = re.split(_self.NL, str(s))
# parse it line-by-line
for line in lines:
# strip the line of extra spaces
line = line.strip()
lenline = len(line)
# comment or empty line, skip it
if not lenline or (line[0] in comments): continue
linestartswith = line[0]
# section line
if '['==linestartswith:
SECTION = True
# parse any sub-sections
while '['==linestartswith:
if SECTION:
currentRoot = sections
else:
currentRoot = currentRoot[currentSection]
SECTION = False
endsection = line.find(']', 1)
currentSection = line[1:endsection]
if currentSection not in currentRoot:
if keysList:
currentRoot[currentSection] = { '__list__' : [] }
else:
currentRoot[currentSection] = { }
# has sub-section ??
line = line[endsection+1:].strip()
if not len(line): break
linestartswith = line[0]
# key-value pairs
else:
# quoted string
if '"'==linestartswith or "'"==linestartswith:
key, line = _self.parseStr(line, linestartswith)
# key-value pair
if line.find('=', 0)>-1:
line = line.split('=')
line.pop(0)
value = "=".join(line).strip()
valuestartswith = value[0]
# quoted value
if '"'==valuestartswith or "'"==valuestartswith:
value, rem = _self.parseStr(value, valuestartswith)
currentRoot[currentSection][key] = value
# single value
else:
if keysList:
currentRoot[currentSection]['__list__'].append(key)
else:
currentRoot[currentSection][key] = True
# un-quoted string
else:
line = line.split('=')
key = line.pop(0).strip()
# single value
if 1>len(line):
if keysList:
currentRoot[currentSection]['__list__'].append(key)
else:
currentRoot[currentSection][key] = True
# key-value pair
else:
value = "=".join(line).strip()
valuestartswith = value[0]
# quoted value
if '"'==valuestartswith or "'"==valuestartswith:
value, rem = _self.parseStr(value, valuestartswith)
currentRoot[currentSection][key] = value
return sections
def fromFile(filename, keysList=True, rootSection='_'):
s = ''
with open(filename, 'r') as f: s = f.read()
return Ini_Parser.fromString(s, keysList, rootSection)
def walk(o, key=None, top='', q='', EOL="\n"):
s = ''
if len(o):
o = dict(o)
if key: keys = [key]
else: keys = o.keys()
for section in keys:
keyvals = o[section]
if not len(keyvals): continue
s += str(top) + "[" + str(section) + "]" + EOL
if ('__list__' in keyvals) and len(keyvals['__list__']):
# only values as a list
s += q + (q+EOL+q).join(keyvals['__list__']) + q + EOL
del keyvals['__list__']
if len(keyvals):
for k,v in keyvals.items():
if not len(v): continue
if isinstance(v, dict) or isinstance(v, list):
# sub-section
s += Ini_Parser.walk(keyvals, k, top + "[" + str(section) + "]", q, EOL)
else:
# key-value pair
s += q+k+q+ '=' +q+v+q + EOL
s += EOL
return s
def toString(o, rootSection='_', quote=False, EOL="\n"):
s = ''
if rootSection: root = str(rootSection)
else: root = '_'
if quote: q = '"'
else: q = ''
# dump the root section first, if exists
if root in o:
section = dict(o[root])
llist = None
if '__list__' in section:
llist = section['__list__']
if llist and isinstance(llist, list) and len(llist):
s += q + (q+EOL+q).join(llist) + q + EOL
del section['__list__']
for k,v in section.items():
if not len(v): continue
s += q+k+q+ '=' +q+v+q + EOL
s += EOL
del o[root]
# walk the sections and sub-sections, if any
s += Ini_Parser.walk(o, None, '', q, EOL)
return s
def toFile(filename, o, rootSection='_', quote=False, EOL="\n"):
with open(filename, 'w') as f:
f.write( Ini_Parser.toString(o, rootSection, quote, EOL) )
# for use with 'import *'
__all__ = [ 'Ini_Parser' ]

search pattern in sequence and report identity

I have 2 fasta files with sequence's in it.I want to align the sequences in second file to first file and report identity
For example:
File1:
>s1
aaccggactggacatccg
>s2
gtcgactctcggaattg
....
File2:
>a1
actg
>a2
tccg
.....
I want to take the file2 sequences and look in file1 and print the matching with mismatched base in uppercase and identity in csv format
Output
name,a1_alignment,a1_identity,a2_alignment,a2_identity
s1,actg,100,tccg,100
s2,aCtg,95,tcCg,95
Here what I did so far:
import sys
import os,csv
from Bio import SeqIO
from itertools import *
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-m", "--mismatch_threshold", dest="mismatch_threshold", default = 2,
help="This is the number of differences you'll allow between the actualread and your sequence of interest. Default is 2")
(options, args) = parser.parse_args()
if len(sys.argv) != 4:
print "Usage : python search.py <file1> <file2> <fout>"
sys.exit()
f1 = open(sys.argv[1],'r')
f2 = open(sys.argv[2],'r')
fout = open(sys.argv[3],'w')
writer = csv.writer(fout)
def long(f1):
for record in SeqIO.parse(f1,'fasta'):
header = record.name
sequence = record.seq
yield [header, sequence]
def short(f2):
for record in SeqIO.parse(f2,'fasta'):
head = record.name
seq = record.seq
return seq
def alignment(sequence,seq,mismatch_threshold):
l1 = len(sequence)
l2 = len(seq)
alignment = []
for i in range(0,min(l1,l2)):
if sequence[i] == seq[i]:
alignment.append(i)
else:
mismatch = sum( c1 != c2 for c1,c2 in zip(sequence,seq))
if mismatch <= mismatch_threshold:
alignment.append(i)
k = 0
l = 0
for read in alignment:
for letter in read:
if letter == isupper():
pass
else:
if letter == alignment[0].seq[j]:
l +=1
k += 1
k = 0
length = seq
percent = 100*l/len(seq)
#print percent
yield percent
longsequences = long(open(sys.argv[1],'r'))
shortsequences = short(open(sys.argv[2],'r'))
align = alignment(longsequences,shortsequences,options.mismatch_threshold)
for name in head:
writer.writerow(( name +'_alignment' , name + '_identity'))
for s in align:
# print to csv file
I need help in looking the file2 sequences in file1 with mismatches and print the alignment and also in calculating the identity percentage
Error:
File "s.py", line 34, in alignment
l1 = len(sequence)
TypeError: object of type 'generator' has no len()

Cutting character values according to value from file

This is the which i am doing
import csv
output = open('output.txt' , 'wb')
# this functions return the min for num.txt
def get_min(num):
return int(open('%s.txt' % num, 'r+').readlines()[0])
# temporary variables
last_line = ''
input_list = []
#iterate over input.txt in sort the input in a list of tuples
for i, line in enumerate(open('input.txt', 'r+').readlines()):
if i%2 == 0:
last_line = line
else:
input_list.append((last_line, line))
filtered = [(header, data[:get_min(header[-2])] + '\n' ) for (header, data) in input_list]
[output.write(''.join(data)) for data in filtered]
output.close()
In this code input.txt is something like this
>012|013|0|3|M
AFDSFASDFASDFA
>005|5|67|0|6
ACCTCTGACC
>029|032|4|5|S
GGCAGGGAGCAGGCCTGTA
and num.txt is something like this
M 4
P 10
I want that in above input.txt check the amount of value from the num.txt by looking at its last column which is same like in num.txt and cut its character according to that values
I think the error in my code is that it only accept the integer text file , where it should also accept file which contain alphabets
The totally revised version, after a long chat with the OP;
import os
import re
# Fetch all hashes and counts
file_c = open('num.txt')
file_c = file_c.read()
lines = re.findall(r'\w+\.txt \d+', file_c)
numbers = {}
for line in lines:
line_split = line.split('.txt ')
hash_name = line_split[0]
count = line_split[1]
numbers[hash_name] = count
#print(numbers)
# The input file
file_i = open('input.txt')
file_i = file_i.read()
for hash_name, count in numbers.iteritems():
regex = '(' + hash_name.strip() + ')'
result = re.findall(r'>.*\|(' + regex + ')(.*?)>', file_i, re.S)
if len(result) > 0:
data_original = result[0][2]
stripped_data = result[0][2][int(count):]
file_i = file_i.replace(data_original, '\n' + stripped_data)
#print(data_original)
#print(stripped_data)
#print(file_i)
# Write the input file to new input_new.txt
f = open('input_new.txt', 'wt')
f.write(file_i)
You can do it like so;
import re
min_count = 4 # this variable will contain that count integer from where to start removing
str_to_match = 'EOG6CC67M' # this variable will contain the filename you read
input = '' # The file input (input.txt) will go in here
counter = 0
def callback_f(e):
global min_count
global counter
counter += 1
# Check your input
print(str(counter) + ' >>> ' + e.group())
# Only replace the value with nothing (remove it) after a certain count
if counter > min_count:
return '' # replace with nothing
result = re.sub(r''+str_to_match, callback_f, input)
With this tactic you can keep count with a global counter and there's no need to do hard line-loops with complex structures.
Update
More detailed version with file access;
import os
import re
def callback_f(e):
global counter
counter += 1
# Check your input
print(str(counter) + ' >>> ' + e.group())
# Fetch all hash-file names and their content (count)
num_files = os.listdir('./num_files')
numbers = {}
for file in num_files:
if file[0] != '.':
file_c = open('./num_files/' + file)
file_c = file_c.read()
numbers[file.split('.')[0]] = file_c
# Now the CSV files
csv_files = os.listdir('./csv_files')
for file in csv_files:
if file[0] != '.':
for hash_name, min_count in numbers.iteritems():
file_c = open('./csv_files/' + file)
file_c = file_c.read()
counter = 0
result = re.sub(r''+hash_name, callback_f, file_c)
# Write the replaced content back to the file here
Considered directory/file structure;
+ Projects
+ Project_folder
+ csv_files
- input1.csv
- input2.csv
~ etc.
+ num_files
- EOG6CC67M.txt
- EOG62JQZP.txt
~ etc.
- python_file.py
The CSV files contain the big chunks of text you state in your original question.
The Num files contain the hash-files with an Integer in them
What happens in this script;
Collect all Hash files (in a dictionary) and it's inner count number
Loop through all CSV files
Subloop through the collected numbers for each CSV file
Replace/remove (based on what you do in callback_f()) hashes after a certain count
Write the output back (it's the last comment in the script, would contain the file.write() functionality)

Categories