import random
#get filename
name = input('Enter filename: ')
#load file
try:
input_file = open(name, 'r')
except IOError:
print('File does not exist. Program will terminate.')
#make key value
line = input_file.readline()
key = []
key_mix = []
for i in range(len(line)):
if line[i] not in key:
key.append(line[i])
for i in range(len(line)):
if line[i] not in key_mix:
key_mix.append(line[i])
random.shuffle(key_mix)
#encryption
if name.split('.')[1] == 'txt':
try:
key_file = open(name.split('.')[0] + '.key', 'w')
enc_file = open(name.split('.')[0] + '.enc', 'w')
except IOError:
print('File does not exist. Program will terminate.')
key_write = ['']
for g in range(len(key)):
key_write[0] += key_mix[g]
for i in range(len(key)):
keys = str(key[i]) + ',' + str(key_mix[i])
key_file.write(keys+'\n')
couple = {}
for k in range(len(key)):
couple[key[k]] = key_mix[k]
enc = ['']
for t in range(len(line)):
enc[0] += couple.get(line[t])
enc_file.write(enc[0])
input_file.close()
key_file.close()
enc_file.close()
#decryption
elif name.split('.')[1] == 'enc':
try:
key_file = open(name.split('.')[0] + '.key', 'r')
dec_file = open(name.split('.')[0] + '.txt', 'w')
except IOError:
print('File does not exist. Program will terminate.')
line = input_file.readline()
dec = ['']
sol = {}
while True:
sen = key_file.readline()
if not sen: break
sol.update({sen[2]:sen[0]})*Problem Here*
for m in range(len(line)):
dec[0] += sol.get(line[m])
dec_file.write(dec[0])
input_file.close()
key_file.close()
dec_file.close()
It makes error:
IndexError: string index out of range
and when I check my .key file, it comes like
t,o
h,l
e,s
r,h
i,t
s,r
,n
n,v
o,u
u,e
f,i
l,f
v,
but when I print readline, it comes like
t,o
(blank)
e,s
(blank)
i,t
(blank)
,n
(blank)
o,u
(blank)
f,i
(blank)
v,
(blank)
How can I fix it?
Related
I have a file in1.txt
info="0x0000b573" data="0x7" id="sp. PCU(Si)"
info="0x0000b573" data="0x00000007" id="HI all. SHa"
info="0x00010AC3" data="0x00000003" id="abc_16. PS"
info="0x00010ac3" data="0x00000045" id="hB2_RC/BS (Spr)"
info="0x205" data="0x00000010" id="cgc_15. PK"
info="0x205" data="0x10" id="cgsd_GH/BS (Scd)"
Expected output: out.txt
info="0x00010AC3" data="0x00000003" id="abc_16. PS"
info="0x00010ac3" data="0x00000045" id="hB2_RC/BS (Spr)"
I need only lines that have same info values and different data values to be written to out.txt.
but the current code removes all the line that have string data in it.
with open("in.txt", "r") as fin,open("out.txt", "w") as fout:
for line in fin:
if 'data' not in line:
fout.write(line.strip()+'\n')
what i need is for eg: line 1 and line 2 is having same info="0x0000b573" and data is "0x7" & "0x00000007" which is same then remove that line.
You can use regex
import re
s = '''info="0x0000b573" data="0x7" id="sp. PCU(Si)"
info="0x0000b573" data="0x00000007" id="HI all. SHa"
info="0x00010AC3" data="0x00000003" id="abc_16. PS"
info="0x00010ac3" data="0x00000045" id="hB2_RC/BS (Spr)"
info="0x205" data="0x00000010" id="cgc_15. PK"
info="0x205" data="0x10" id="cgsd_GH/BS (Scd)"'''
parsed_data = re.findall(r'info="([^"]+)" data="([^"]+)" id="[^"]+"', s, re.MULTILINE)
parsed_data = sorted([list(map(lambda x: int(x, 16), i)) + [index] for index,i in enumerate(parsed_data)])
row_numbers = [j for i in [[parsed_data[i][-1], parsed_data[i+1][-1]] for i in range(0,len(parsed_data),2) if parsed_data[i][1] != parsed_data[i+1][1]] for j in i]
final_output = []
for index,line in enumerate(s.split('\n')):
if index in row_numbers:
final_output.append(line)
final_out_text = '\n'.join(final_output)
print(final_out_text)
# info="0x00010AC3" data="0x00000003" id="abc_16. PS"
# info="0x00010ac3" data="0x00000045" id="hB2_RC/BS (Spr)"
You could try something like that too, I think
#!/usr/bin/python3
records = {}
items = []
info = []
data = []
with open("in.dat", "r") as fin:
for line in fin:
items=line.split(' ')
info = items[0].split('=')
data = items[1].split('=')
try:
key = info[1].strip('"').lower()
value = str(int(data[1].strip('"'), 16))
records[key][value] += 1
except KeyError:
try:
records[key][value] = 1
except KeyError:
records[key] = {value: 1}
out = dict()
for key in records:
for value in records[key]:
if records[key][value] == 1:
try:
out[key].append(value)
except KeyError:
out[key] = [value]
with open("out.dat", "w") as fout:
for key in out:
for value in out[key]:
fout.write(f"{key}={value}\n")
Something like this could work:
found_info_values = []
with open("in.txt", "r") as fin,open("out.txt", "w") as fout:
for line in fin:
info = line.split('"')[1]
if info not in found_info_values:
fout.write(line.strip()+'\n')
found_info_values += info
I'm getting this error message when using yield.
When I remove the yield results and yield timeout the code works fine without the error message
I don't know what is directory or file 'o' since I'm not using it in any way in the code.
here is my full code:
import gradio as gr
import ipaddress
import requests
from requests.auth import HTTPBasicAuth
import os
import string
from datetime import date, datetime
####SETTING UP DATE AND TIME WITH ISRAELI FORMAT###
current_date = date.today()
current_month = current_date.strftime('%B')
current_year = current_date.strftime('%Y')
date_reformat = current_date.strftime('%d/%m/%y')
current_day = current_date.strftime('%d')
###SWITCH###
def switch_ver(ip):
with open('switches_successful_results.txt','w') as switches_successful, open('switches_failed_results.txt', 'w') as switches_failed:
ip_addr = ip.split()
for i in ip_addr:
ip_addr = list(ipaddress.ip_network(i))
try:
basic=HTTPBasicAuth('some','password')
login = requests.post('http://'+i+':80/rest/v7/login-sessions', auth=basic)
cookie = login.cookies
get_ver = requests.get('http://'+i+':80/rest/v7/system/status', cookies=cookie)
get_ver = get_ver.json()
get_ver = get_ver['firmware_version']
get_ver = get_ver
with open('switches_successful_results.txt', 'a+') as sw:
results = 'Switch version for {} is: {} \n'.format(i, get_ver)
sw.write(results)
yield results
except requests.exceptions.ConnectTimeout:
timeout = 'Could not connect to switch: '+i+' REQUEST TIMED OUT\n'
with open('switches_failed_results.txt', 'a+') as sw:
sw.write(timeout)
yield timeout
with open('switches_successful_results.txt','r') as switches_successful, open('switches_failed_results.txt', 'r') as switches_failed:
summary = switches_failed.read() + switches_successful.read()
return (summary),['switches_successful_results.txt', 'switches_failed_results.txt']
###IPBlockerK###
def block_ip(ip):
duplicate_ips = []
blocked_ips = []
invalid_ips = []
with open('fortigate_ips.txt','r+') as f, open('fortigate_urls.txt', 'r+') as u:
fortigate_ips = f.read()
fortigate_urls = u.read()
ip_addr = ip.split()
for i in ip_addr:
try:
list(ipaddress.ip_network(i))
if i in fortigate_ips:
duplicate_ips.append(i)
elif ipaddress.ip_address(i).is_private:
invalid_ips.append(i)
else:
blocked_ips.append(i)
f.write(i + '\n')
except ValueError:
if i in fortigate_ips or i in fortigate_urls:
duplicate_ips.append(i)
elif i[0] in string.ascii_letters or i[0] == '*':
blocked_ips.append(i)
u.write(i + '\n')
else:
invalid_ips.append(i)
current_time = datetime.now()
current_time = current_time.strftime('%H:%M:%S')
if os.path.exists(current_year) == False:
os.makedirs(current_year + '\\'+ current_month + '\\' + current_day)
os.chdir(current_year+ '\\' + current_month +'\\'+ current_day)
with open('Blocked_IPs.txt', 'a+') as Blocked_IPs:
to_file = ('###############{}###############\n'.format(current_time)+'\n'.join(blocked_ips))+'\n'
Blocked_IPs.write(to_file)
os.chdir('D:\\programs\\Python310\\Projects\\net_sec')
elif os.path.exists(current_year) == True and os.path.exists(current_year + '\\'+ current_month) == False:
os.chdir(current_year)
os.makedirs(current_month + '\\' + current_day)
os.chdir(current_month +'\\'+ current_day)
with open('Blocked_IPs.txt', 'a+') as Blocked_IPs:
to_file = ('###############{}###############\n'.format(current_time)+'\n'.join(blocked_ips))+'\n'
Blocked_IPs.write(to_file)
os.chdir('D:\\programs\\Python310\\Projects\\net_sec')
elif os.path.exists(current_year) == True and os.path.exists(current_year + '\\'+ current_month) == True and os.path.exists(current_year + '\\'+ current_month + '\\' + current_day) == False:
os.chdir(current_year + '\\'+ current_month)
os.mkdir(current_day)
os.chdir(current_day)
with open('Blocked_IPs.txt', 'a+') as Blocked_IPs:
to_file = ('###############{}###############\n'.format(current_time)+'\n'.join(blocked_ips))+'\n'
Blocked_IPs.write(to_file)
os.chdir('D:\\programs\\Python310\\Projects\\net_sec')
else:
os.chdir(current_year + '\\' + current_month + '\\' + current_day)
with open('Blocked_IPs.txt', 'a+') as Blocked_IPs:
to_file = ('###############{}###############\n'.format(current_time)+'\n'.join(blocked_ips))+'\n'
Blocked_IPs.write(to_file)
os.chdir('D:\\programs\\Python310\\Projects\\net_sec')
blocked_ips_result = 'Following IP\s or URLs were Blocked!: \n'+'\n'.join(blocked_ips) +'\n'
duplicate_ips_result = 'Skipped!...Found duplicates IP\s for: \n'+'\n'.join(duplicate_ips) +'\n'
invalid_ips_result = 'Skipped!..Invalid IP\s for \n'+'\n'.join(invalid_ips) +'\n'
with open('fortigate_ips.txt', 'r') as f, open('fortigate_urls.txt', 'r') as u:
current_commit_stats = len(blocked_ips)
ips_stats = len(f.readlines())
urls_stats = len(u.readlines())
total_stats = ips_stats + urls_stats
if bool(duplicate_ips) == True and bool(blocked_ips) == False:
print(1)
return duplicate_ips_result, current_commit_stats, ips_stats, urls_stats, total_stats
elif bool(duplicate_ips) == True and bool(blocked_ips) == True and bool(invalid_ips) == True:
print(2)
return invalid_ips_result + duplicate_ips_result + blocked_ips_result, current_commit_stats, ips_stats, urls_stats, total_stats
elif bool(invalid_ips) == True and bool(blocked_ips) == True:
print(3)
return invalid_ips_result + blocked_ips_result, current_commit_stats, ips_stats, urls_stats, total_stats
elif bool(invalid_ips) == True and bool(blocked_ips) == True:
print(4)
return invalid_ips_result + blocked_ips_result, current_commit_stats, ips_stats, urls_stats, total_stats
else:
print(5)
return (blocked_ips_result), current_commit_stats, ips_stats, urls_stats, total_stats
###GRADIO GUI###
#f = open('fortigate_ips.txt', 'r')
#fortigate = (f.read().split())
#f.close()
with gr.Blocks(title = 'Switcher') as switches_ver:
gr.Markdown('Welcome to IPBlocker')
with gr.Tab(label = 'IPBlocker'):
with gr.Row():
with gr.Column():
ips_to_block = gr.Textbox(label = "IPs", lines = 10, placeholder=('Please fill Ips to block'))
block_btn = gr.Button('Block')
#ip_lookup = gr.Dropdown(fortigate)
with gr.Column():
output_textbox = gr.Textbox(label = "Results", lines=10)
with gr.Row():
current_commit_stats = gr.Textbox(label = 'Current IP\s or URLs added to block:')
forti_ips_stats = gr.Textbox(label = 'Total blocked IP\s on Fortigate: ')
forti_urls_stats = gr.Textbox(label = 'Total URLs blocked on Fortigate')
forti_total_stats = gr.Textbox(label = 'Total blocked IP\s and URLs on Fortigate')
block_btn.click(fn=block_ip, inputs = ips_to_block, outputs = [output_textbox, current_commit_stats, forti_ips_stats, forti_urls_stats, forti_total_stats])
with gr.Tab(label = 'Switcher'):
with gr.Row():
with gr.Column():
switch_box = gr.Textbox(label = 'Switches', lines = 10, placeholder='Please fill switches IPs...')
show_ver = gr.Button('Show current switches version')
upgrade_ver = gr.Button('Upgrade selected switches')
with gr.Column():
output_textbox = gr.Textbox(label='Results',lines = 10)
output_file = gr.File(['switches_successful_results.txt', 'switches_failed_results.txt'])
show_ver.click(fn=switch_ver, inputs = switch_box, outputs = [output_textbox, output_file])
upgrade_ver.click(fn=block_ip, inputs = ips_to_block, outputs=[output_textbox, output_file])
switches_ver.queue(concurrency_count=20, max_size=20).launch()
full error traceback:
Traceback (most recent call last):
File "D:\programs\Python310\lib\site-packages\gradio\routes.py", line 273, in run_predict
output = await app.blocks.process_api(
File "D:\programs\Python310\lib\site-packages\gradio\blocks.py", line 757, in process_api
predictions = self.postprocess_data(fn_index, result["prediction"], state)
File "D:\programs\Python310\lib\site-packages\gradio\blocks.py", line 721, in postprocess_data
block.postprocess(prediction_value)
File "D:\programs\Python310\lib\site-packages\gradio\components.py", line 2147, in postprocess
"name": processing_utils.create_tmp_copy_of_file(
File "D:\programs\Python310\lib\site-packages\gradio\processing_utils.py", line 323, in create_tmp_copy_of_file
shutil.copy2(file_path, file_obj.name)
File "D:\programs\Python310\lib\shutil.py", line 434, in copy2
copyfile(src, dst, follow_symlinks=follow_symlinks)
File "D:\programs\Python310\lib\shutil.py", line 254, in copyfile
with open(src, 'rb') as fsrc:
FileNotFoundError: [Errno 2] No such file or directory: 'o'
The 'o' came from the timeout text "Could not connect..."
From what I understand about gradio, the result, for both yield and return seems to be processed to outputs, which is output_textbox and output_file
As the yield result is timeout (similar goes for results yield case):
output_textbox = timeout[0] = 'C'
output_file = timeout[1] = 'o'
If you want to remove the errors, you should change the yield result to be compatible to the outputs.
For example:
yield timeout, ['switches_successful_results.txt', 'switches_failed_results.txt']
If you are using yield you can iterate only once. It doesn't keep data on memory for all time. Check this out: https://stackoverflow.com/a/231855/17318894
I'm getting:
"unexpected character after line continuation character"
How should I write the line = line.strip("\xef\xbb\n\xbf")line without getting that error.
dataFile = open("data.txt","r")
updateFile = open("update","r")
newFile = open("newdata","w")
dataMatrix = []
updateMatrix = []
cardList = []
for line in dataFile:
line = line.strip("\xef\xbb\n\xbf")
tmp = line.split(" ")
cardNum = tmp[0]
cardName = " ".join(tmp[1:-2])
cardDate = tmp[-2]
cardSum = tmp[-1]
dataMatrix.append([cardNum,cardName,cardDate,cardSum])
cardList.append(cardNum)
i = 0
updateDate = ""
for line in updateFile:
line = line.strip("\xef\xbb\n\xbf")
if i==0 : updateDate = line; i=1; continue;
tmp = line.split(" ")
upNum = tmp[0]
upName = " ".join(tmp[1:-1])
upSum = tmp[-1]
updateMatrix.append([upNum,upName,upSum])
for row in updateMatrix:
if row[0] in cardList:
index = cardList.index(row[0])
plus = row[2]
if plus[0] == "+":
plus = int(plus[1:])
else:
plus = -int(plus[1:])
curSum = int(dataMatrix[index][3])
newSum = curSum+plus
dataMatrix[index][3] = newSum
dataMatrix[index][2] = updateDate
# dataMatrix[index][]
else:
dataMatrix.append([row[0],row[1],updateDate,row[2][1:]])
dataMatrix.sort(key=lambda row: row[0])
for row in dataMatrix:
print row
newFile.write(" ".join(str(a) for a in row) + "\n")
So i need to write a program that prompts for a file name, then opens that file and reads through the file, looking for lines of the form:X-DSPAM-Confidence: 0.8475
I am stuck in getting the sum of the extracted values and counting the lines and printing to show the user.
out_number = 'X-DSPAM-Confidence: 0.8475'
Num = 0.0
flag = 0
fileList = list()
fname = input('Enter the file name')
try:
fhand = open(fname)
except:
print('file cannot be opened:',fname)
for line in fhand:
fileList = line.split()
print(fileList)
for line in fileList:
if flag == 0:
pos = out_number.find(':')
Num = out_number[pos + 2:]
print (float(Num))
You have an example line in your code, and when you look through each line in your file, you compute the number in your example line, not in the line from the file.
So, here's what I would do:
import os
import sys
fname = input('Enter the file name: ')
if not os.path.isfile(fname):
print('file cannot be opened:', fname)
sys.exit(1)
prefix = 'X-DSPAM-Confidence: '
numbers = []
with open(fname) as infile:
for line in infile:
if not line.startswith(prefix): continue
num = float(line.split(":",1)[1])
print("found:", num)
numbers.append(num)
# now, `numbers` contains all the floating point numbers from the file
average = sum(numbers)/len(numbers)
But we can make it more efficient:
import os
import sys
fname = input('Enter the file name: ')
if not os.path.isfile(fname):
print('file cannot be opened:', fname)
sys.exit(1)
prefix = 'X-DSPAM-Confidence: '
tot = 0
count = 0
with open(fname) as infile:
for line in infile:
if not line.startswith(prefix): continue
num = line.split(":",1)[1]
tot += num
count += 1
print("The average is:", tot/count)
try this
import re
pattern = re.compile("X-DSPAM-Confidence:\s(\d+.\d+)")
sum = 0.0
count = 0
fPath = input("file path: ")
with open('fPath', 'r') as f:
for line in f:
match = pattern.match(line)
if match is not None:
lineValue = match.group(1)
sum += float(lineValue)
count += 1
print ("The average is:", sum /count)
fname = input("Enter file name: ")
fh = open(fname)
count=0
x=0
for line in fh:
if not line.startswith("X-DSPAM-Confidence:") : continue
x=float(line.split(":")[1].rstrip())+x
count=count+1
output=x/count
print("Average spam confidence:",output)
I'm running a piece of freely available python code used to detect CNVs in single cell sequencing data:
#!/usr/bin/env python
import sys
def main():
infilename = sys.argv[1]
outfilename = sys.argv[2]
statfilename = sys.argv[3]
chrominfo = ("/path/hg19.chrom.sizes.txt", 0)
bins = ("/path/hg19.bin.boundaries.50k.bowtie.k50.sorted.txt", 0)
INFILE = open(infilename, "r")
OUTFILE = open(outfilename, "w")
STATFILE = open(statfilename, "w")
binCounts = []
for i in range(len(bins)):
binCounts.append(0)
print len(binCounts)
print len(bins)
counter = 0
totalReads = 0
prevChrompos = ""
for x in INFILE:
arow = x.rstrip().split("\t")
thisChrom = arow[2]
thisChrompos = arow[3]
if thisChrom.find("_") > -1:
#print thisChrom
continue
if thisChrom == "chrM":
#print thisChrom
continue
if thisChrom == "":
continue
if chrominfo.has_key(thisChrom):
pass
else:
continue
totalReads += 1
thisChrominfo = chrominfo[thisChrom]
thisAbspos = long(thisChrompos) + long(thisChrominfo[2])
counter += 1
indexUp = len(bins) - 1
indexDown = 0
indexMid = int((indexUp - indexDown) / 2.0)
while True:
if thisAbspos >= long(bins[indexMid][2]):
indexDown = indexMid + 0
indexMid = int((indexUp - indexDown) / 2.0) + indexMid
else:
indexUp = indexMid + 0
indexMid = int((indexUp - indexDown) / 2.0) + indexDown
if indexUp - indexDown < 2:
break
binCounts[indexDown] += 1
prevChrompos = thisChrompos
for i in range(len(binCounts)):
thisRatio = float(binCounts[i]) / (float(counter) / float(len(bins)))
OUTFILE.write("\t".join(bins[i][0:3]))
OUTFILE.write("\t")
OUTFILE.write(str(binCounts[i]))
OUTFILE.write("\t")
OUTFILE.write(str(thisRatio))
OUTFILE.write("\n")
binCounts.sort()
STATFILE.write("TotalReads\tMedianBinCount\n")
STATFILE.write(str(totalReads))
STATFILE.write("\t")
STATFILE.write(str(binCounts[len(bins)/2]))
STATFILE.write("\n")
INFILE.close()
OUTFILE.close()
STATFILE.close()
def fileToDictionary(inputFile, indexColumn):
input = open(inputFile, "r")
rd = dict()
# input.readline()
for x in input:
arow = x.rstrip().split("\t")
id = arow[indexColumn]
if rd.has_key(id):
#rd[id].append(arow)
print "duplicate knowngene id = " + id
print "arow = " + str(arow)
print "rd[id] = " + str(rd[id])
else:
rd[id] = arow
input.close()
return(rd)
def fileToArray(inputFile, skipFirst):
input = open(inputFile, "r")
ra = []
for i in range(skipFirst):
input.readline()
for x in input:
arow = x.rstrip().split("\t")
ra.append(arow)
input.close()
return(ra)
if __name__ == "__main__":
main()
I'm getting an error on line 40:
Traceback (most recent call last):
File "/path/varbin.50k.sam.py", line 129, in <module>
main()
File "/path/varbin.50k.sam.py", line 40, in main
**if chrominfo.has_key(thisChrom):
AttributeError: 'tuple' object has no attribute 'has_key'**
I don't work regularly in Python, can someone offer a suggestion?
Where do I begin?
Your code is expecting a dictionary and getting a tuple. I think you've missed a step: You need to change
chrominfo = ("/path/hg19.chrom.sizes.txt", 0)
To
chrominfo = fileToDictionary("/path/hg19.chrom.sizes.txt", 0)
Note also that if dict.has_key(key) has been deprecated in favour of if key in dict.keys()