How to create plot on python, spyder - python

I need to generate data and save it on a file in a directory, both are created at run time. "File Not Found error" occurs
I have some data which is created as below method
log = AnalyzeLog()
then I need to save that data in a file with .csv extension in the directory, both the directory and file supposed to be created at run time using the below code but I am not been able to create both...
plot_data_path = "E:\\Malicious_TLS_Detection-master\\M_TLS_Detection\\dataset\\data_model"
dir_name=dataset0
for dir_name in normal_folder_path:
path_to_single = normal_path + "\\" + dir_name
__PrintManager__.single_folder_header(path_to_single)
log.evaluate_features(path_to_single)
__PrintManager__.succ_single_folder_header()
log.create_plot_data(plot_data_path, dir_name)
def create_plot_data(self, path, filename):
__PrintManager__.evaluate_creating_plot()
self.create_dataset(path, filename)
__PrintManager__.succ_evaluate_data()
def create_dataset(self, path, filename):
index = 0
ssl_flow = 0
all_flow = 0
malicious = 0
normal = 0
# file header: label feature
header = [\
'label',\
'avg_domain_name_length',\
'std_domain_name_length',\
'avg_IPs_in_DNS']
with open(
path + "\\dataset-" + filename + ".csv", 'w+',
newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
for key in self.conn_tuple:
label_feature = [\
str(self.conn_tuple[key].is_malicious()),\
length()),\
str(self.conn_tuple[key].avg_IPs_in_DNS())]
writer.writerow(label_feature)
print("<<< dataset file dataset-%s.csv successfully created !" %
filename)
The code just breaks at
with open(
path + "\\dataset-" + filename + ".csv", 'w+',
newline='') as f:
path=E:\\Malicious_TLS_Detection-master\\M_TLS_Detection\\dataset\\data_model
filename=dataset0
Data in the csv format must be created in a file but the following error arises
"No such file or directory: 'E:\Malicious_TLS_Detection-master\M_TLS_Detection\dataset\data_model\dataset-dataset0.csv'"

Related

Permission error moving file after processing in loop in Python

I have 3 json files that need to be parsed by python.
file1.jasn
file2.json
file3.json
I have intentionally sabotaged the format in file3.json so it doesn't actually contain correct json formatting.
my code:
import os, json, shutil
fileRoot = 'C:/root/python/'
inputFiles = fileRoot + 'input/'
processed_folder = fileRoot + 'processed/'
error_folder = fileRoot + 'error/'
print("processFiles")
print('inputfiles = ' + inputFiles)
if any(File.endswith(".json") for File in os.listdir(inputFiles)):
json_files = [pos_json for pos_json in os.listdir(inputFiles) if pos_json.endswith('.json')]
print('--------------------FILES IN DIRECTORY----------------------')
print(json_files)
print( '--------------------FILE LOOPING----------------------------')
for eachfile in json_files:
print(eachfile)
with open((inputFiles + eachfile), 'r') as f:
try:
data = json.load(f)
except :
shutil.move((inputFiles + eachfile), error_folder)
The idea is that if it doesn't parse the JSON, the file should be moved to another folder called 'error'
However, I keep getting errors such as:
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:/Python/input/file3.json' -> 'C:/root/Python/input/file3.json'
Why is this happening?
You are opening the files, and they stay open until the with block exits.
As a work-around you can store the files that you want to move in a list:
move_to_error = []
move_to_valid = []
for eachfile in json_files:
print(eachfile)
with open((inputFiles + eachfile), 'r') as f:
try:
data = json.load(f)
# if we have an exception in the previous line,
# the file will not be appended to move_to_valid
move_to_valid.append(eachfile)
except:
move_to_error.append(eachfile)
for eachfile in move_to_error:
shutil.move((inputFiles + eachfile), error_folder)

Cannot merge various text files with similar names into one file

I have an issue, where I iterate through the folder and would like to merge files containing particular name. I have files like 1a_USR02.txt, 1b_USR02.txt and 1a_USR06, 1b_USR06. However when I use the following code, the final file FINAL_USR02 or FINAL_USR06 contains only the second file (1b_USR02 or 1b_UR06). Any ideas?
import os
import shutil
cwd = os.getcwd()
directory = (cwd + '\\FINAL' + '\\')
delheadfiles = ['UST04', 'USR02', 'USR06','1251', 'AGRS', 'TEXTS',\
'USERS', 'FLAGS', 'DEVACCESS', 'USERNAME', 'TSTC', 'TSTCT']
for delheadfile in delheadfiles:
for file in os.listdir(directory):
if file.endswith(delheadfile + ".txt"):
table04 = (directory + 'FINAL_' + delheadfile + '.txt')
with open(directory + file, 'rb') as readfile:
if file.endswith(delheadfile + ".txt"):
with open(table04, 'wb') as outfile:
shutil.copyfileobj(readfile, outfile)
Try this:
import os
files_extensions = ['UST04', 'USR02', 'USR06']
folder_files = os.listdir()
for extension in files_extensions:
with open('FINAL_' + extension + '.txt', 'a+') as out_file:
for item in folder_files:
if item.endswith(extension + '.txt'):
data = open(item, 'r').read()
out_file.write(data)
out_file.close()

How to save all files, not replacing the file?

I have 100 text files and I want to save it into 100 text files too. Right now, my coding can read all the files but it save only one file, which is the latest result. Here I attached the code.
def nama():
path = "C:/Amar/code/"
infilename = os.listdir(path)
print len(infilename)
for filename in infilename:
print("jumpa dah" + path + "\\"+ filename)
f = open(path + "\\" + filename, "r")
data = f.read()
f.close()
lines = data.split('\n')
outfilename = path + "result.txt"
print outfilename
f = open(outfilename , "a")
Append a string that will act as a unique identifier for each output file. You can use the input filename for this:
outfilename = path + filename + "_result.txt"
# e.g reports_result.txt

How to rename file names using text file in python

I need to rename a bunch of files in a folder with new name reference from a text file. Can you please give an example for this.
My New Names In a Text file:
1BA
1BB
1BC
1BD
1BE
1BF
1C0
1C1
1C2
1C3
Like this.
Updated Code:
import csv
import os
with open('names.txt') as f2:
filedata = f2.read().split(",")
os.rename(filedata[0].strip(), filedata[1].strip())
f2.close()
f2 = open ('Lines.txt','w')
f2.write(filedata)
f2.close()
What about using a CSV (comma separated) file for input in the format oldPath, newPath and do the following:
import csv
import os
with open('names.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
oldPath = row[0]
newPath = row[1]
os.rename(oldPath, newPath)
Alternatively, if you want to move the file to another directory/filesystem you can have a look at shutil.move
# Create old.txt and rename.txt
# Do not include file path inside the txt files
# For each line, write a filename include the extension
from pathlib import Path
import os
import sys
print("enter path of folder")
path = input()
oldList = []
with open(path + "\\old.txt", "r", encoding="utf8", errors='ignore') as readtxt:
for f in readtxt:
fname = f.rstrip()
oldList.append(fname)
# i is index of oldList
i = 0
newList = []
with open(path + "\\rename.txt", "r", encoding="utf8", errors='ignore') as readtxt:
for f in readtxt:
newName = f.rstrip()
os.rename(path + "\\" + oldList[i], path + "\\" + newName)
i += 1

How to output in different directory?

I have this:
from os import path
base_path = "C:\\texts\\*.txt"
for file in files:
with open (file) as in_file, open(path.join(base_path,"%s_tokenized.txt" % file), "w") as out_file:
data = in_file.readlines()
for line in data:
words = line.split()
str1 = ','.join(words)
out_file.write(str1)
out_file.write("\n")
It produced tokenized files in the same directory it reads from. How can I output those out_files in different directory such as "C:\\texts\\Tokenized" ?
I know there are some ways to move those new files to other directory after producing them, but what I wanna know is that if there is anyway to output new files to other directory at the same time they are produced in above code?
Is this what you're looking for:
import os
import glob
source_pattern = 'c:/texts/*.txt'
output_directory = 'c:/texts/tokenized'
# Iterate over files matching source_pattern
for input_file in glob.glob(source_pattern):
# build the output filename
base,ext = os.path.splitext(os.path.basename(input_file))
output_file = os.path.join(output_directory,base + '_tokenized' + ext)
with open(input_file) as in_file, open(output_file,'w') as out_file:
for line in in_file:
out_file.write(','.join(line.split()) + '\n')
This is how I output to files in arbitrary directories :
dir_name = "../some_dir"
if not os.path.exists(dir_name) : os.makedirs(dir_name)
out_file_name = dir_name + '/out.txt'
out_file = open( out_file_name, 'w')
EDIT :
file_name = "{0}_tokenized.txt".format(something_from_tokenizing)
if not os.path.exists(dir_name) : os.makedirs(dir_name)
out_file_name = dir_name + file_name
EDIT :
I just tried it, worked for me. You simply need two paths, one for the source directory and one for the destination. Hope this helps.
import os
from os import path
f1 = open("in.txt")
f2 = open("out.txt")
files = ["in.txt", "out.txt"]
base_path = "."
dir_name = "./some_dir"
if not os.path.exists(dir_name) : os.makedirs(dir_name)
for file in files:
with open (file) as in_file, open(path.join(dir_name,"%s_tokenized.txt" % file), "w") as out_file:
data = in_file.readlines()
for line in data:
words = line.split()
str1 = ','.join(words)
out_file.write(str1)
out_file.write("\n")

Categories