I have several sub-folders, each of which containing twitter files which are zipped. I want python to iterate through these sub-folders and turn them into regular JSON files.
I have more than 300 sub-folders, each of which containing about 1000 or more of these zipped files.
A sample of these files is named:
00_activities.json.gz%3FAWSAccessKeyId=AKIAJADH5KHBJMUZOPEA&Expires=1404665927&Signature=%2BdCn%252Ffn%2BFfRQhknWWcH%2BtnwlSfk%3D"
Thanks in advance
I have tried the codes below, just to see if I can extract one of those files, but none worked.
import zipfile
zip_ref = zipfile.ZipFile('E:/echoverse/Subdivided Tweets/Subdivided Tweets/Tweets-0', 'r')
zip_ref.extractall('E:/echoverse/Subdivided Tweets/Subdivided Tweets/Tweets-0/00_activities.json.gz%3FAWSAccessKeyId=AKIAJADH5KHBJMUZOPEA&Expires=1404665927&Signature=%2BdCn%252Ffn%2BFfRQhknWWcH%2BtnwlSfk%3D')
zip_ref.close()
I have also tried:
import tarfile
tar = tarfile.open('E:/echoverse/Subdivided Tweets/Subdivided Tweets/Tweets-0/00_activities.json.gz%3FAWSAccessKeyId=AKIAJADH5KHBJMUZOPEA&Expires=1404665927&Signature=%2BdCn%252Ffn%2BFfRQhknWWcH%2BtnwlSfk%3D')
tar.extractall()
tar.close
here is my third try (and no luck):
import gzip
import json
with gzip.open('E:/echoverse/Subdivided Tweets/Subdivided Tweets/Tweets-0/00_activities.json.gz%3FAWSAccessKeyId=AKIAJADH5KHBJMUZOPEA&Expires=1404665927&Signature=%2BdCn%252Ffn%2BFfRQhknWWcH%2BtnwlSfk%3D'
, 'rb') as f:
d = json.loads(f.read().decode("utf-8"))
There is another very similar threat on stackover flow, but my question is different in that my zipped file is originally JSON, and when I use this last method I get this error:
Exception has occurred: json.decoder.JSONDecodeError
Expecting value: line 1 column 1 (char 0)
Simple script that answers the question: it traverses, checks if file (fname) is a gzip (via magic number because I'm cynical) and unzips it.
import json
import gzip
import binascii
import os
def is_gz_file(filepath):
with open(filepath, 'rb') as test_f:
return binascii.hexlify(test_f.read(2)) == b'1f8b'
rootDir = '.'
for dirName, subdirList, fileList in os.walk(rootDir):
for fname in fileList:
filepath = os.path.join(dirName,fname)
if is_gz_file(filepath):
f = gzip.open(filepath, 'rb')
json_content = json.loads(f.read())
print(json_content)
Tested and it works.
Related
I am currently trying to create a loop that goes through a folder and converts every file from .zst to json, and then puts it in a new folder. I have encountered the error above once it gets to the second file in the directory, and says it does not exist in the directory even though it is there. All the files have the same name and are numbered starting at 00000 to 01138.
import os
import zstandard
import pathlib
import json
directory = os.fsencode("D:\data")
for file in os.listdir(directory):
file_name = os.fsdecode(file)
input_file = pathlib.Path(file_name)
if filename.endswith(".zst"):
with open(input_file, 'rb') as compressed:
decomp = zstandard.ZstdDecompressor()
output_path = pathlib.Path("D:\New\Folder") / input_file.stem
with open(output_path, 'wb') as destination:
decomp.copy_stream(compressed, destination)
continue
This is my current code as I am still trying to figure out how to have it output into json instead of file format. Any guidance would be greatly appreciated.
I have this password protected zip folder:
folder_1\1.zip
When I extract this it gives me
1\image.png
How can I extract this to another folder without its folder name? Just the contents of it: image.png
So far I have done all stackoverflows solutions and took me 11 hrs straight just to solve this.
import zipfile
zip = zipfile.ZipFile('C:\\Users\\Desktop\\folder_1\\1.zip', 'r')
zip.setpassword(b"virus")
zip.extractall('C:\\Users\\Desktop') <--target dir to extract all contents
zip.close()
EDIT:
This code worked for me: (Now I want many paths to be extracted at once, any ideas?
import os
import shutil
import zipfile
my_dir = r"C:\\Users\\Desktop"
my_zip = r"C:\\Users\\Desktop\\test\\folder_1\\1.zip"
with zipfile.ZipFile(my_zip) as zip_file:
zip_file.setpassword(b"virus")
for member in zip_file.namelist():
filename = os.path.basename(member)
# skip directories
if not filename:
continue
# copy file (taken from zipfile's extract)
source = zip_file.open(member)
target = file(os.path.join(my_dir, filename), "wb")
with source, target:
shutil.copyfileobj(source, target)
You can use the ZipFile.read() method to read the specific file in the archive, open your target file for writing by joining the target directory with the base name of the source file, and then write what you read to it:
import zipfile
import os
zip = zipfile.ZipFile('C:\\Users\\Desktop\\folder_1\\1.zip', 'r')
zip.setpassword(b"virus")
for name in zip.namelist():
if not name.endswith(('/', '\\')):
with open(os.path.join('C:\\Users\\Desktop', os.path.basename(name)), 'wb') as f:
f.write(zip.read(name))
zip.close()
And if you have several paths containing 1.zip for extraction:
import zipfile
import os
for path in 'C:\\Users\\Desktop\\folder_1', 'C:\\Users\\Desktop\\folder_2', 'C:\\Users\\Desktop\\folder_3':
zip = zipfile.ZipFile(os.path.join(path, '1.zip'), 'r')
zip.setpassword(b"virus")
for name in zip.namelist():
if not name.endswith(('/', '\\')):
with open(os.path.join('C:\\Users\\Desktop', os.path.basename(name)), 'wb') as f:
f.write(zip.read(name))
zip.close()
I have to compress a lot of XML files into and split them by the data in the file name, just for clarification's sake, there is a parser which collects information from XML file and then moves it to a backup folder. My code needs to gzip it according to the date in the filename and group those files in a compressed .gz file.
Please find the code bellow:
import os
import re
import gzip
import shutil
import sys
import time
#
timestr = time.strftime("%Y%m%d%H%M")
logfile = 'D:\\Coleta\\log_compactador_xml_tar'+timestr+'.log'
ptm_dir = "D:\\PTM\\monitored_programs\\"
count_files_mdc = 0
count_files_3gpp = 0
count_tar = 0
#
for subdir, dir, files in os.walk(ptm_dir):
for file in files:
path = os.path.join(subdir, file)
try:
backup_files_dir = path.split(sep='\\')[4]
parser_id = path.split(sep='\\')[3]
if re.match('backup_files_*', backup_files_dir):
if file.endswith('xml'):
# print(time.strftime("%Y-%m-%d %H:%M:%S"), path)
data_arq = file[1:14]
if parser_id in ('parser-924'):
gzip_filename_mdc = os.path.join(subdir,'E4G_PM_MDC_IP51_'+timestr+'_'+data_arq)
with open(path, 'r')as f_in, gzip.open(gzip_filename_mdc + ".gz", 'at') as f_out_mdc:
shutil.copyfileobj(f_in, f_out_mdc)
count_files_mdc += 1
f_out_mdc.close()
f_in.close()
print(time.strftime("%Y-%m-%d %H:%M:%S"), "Compressing file MDC: ",path)
os.remove(path)
except PermissionError:
print(time.strftime("%Y-%m-%d %H:%M:%S"), "Permission error on file:", fullpath, file=logfile)
pass
except IndexError:
print(time.strftime("%Y-%m-%d %H:%M:%S"), "IndexError: ", path, file=logfile)
pass
As long as I seem it creates a stream of data, then compress and write it to a new file with the specified filename. However, instead of grouping each XML file independently inside a ".gz" file, it does creates inside the "gzip" file, a big file (big stream of data?) with the same name of the output "gzip" file, but without any extension. After the files are totally compressed, it's not possible to uncompress the big file generated inside the "gzip" output file. Does someone know where is the problem with my code?
PS: I have edited the code for readability purposes.
Not sure whether the solution is still needed, but I will just leave it here for anyone who faces the same issue.
There is a way to create a gzip archive in python using tarfile, the code is quite simple:
with tarfile.open(filename, mode="w:gz") as archive:
archive.add(name=name_of_file_to_add, recursive=True)
in this case name_of_file_to_add can be a directory, in which case tarfile will add it recursively with all its contents. Obviously you will need to import the tarfile module.
If you need to add files without a directory a simple for with calls to add will do (recursive flag is not required in this case).
I have a directory /directory/some_directory/ and in that directory I have a set of files. Those files are named in the following format: <letter>-<number>_<date>-<time>_<dataidentifier>.log, for example:
ABC1-123_20162005-171738_somestring.log
DE-456_20162005-171738_somestring.log
ABC1-123_20162005-153416_somestring.log
FG-1098_20162005-171738_somestring.log
ABC1-123_20162005-031738_somestring.log
DE-456_20162005-171738_somestring.log
I would like to read those a subset of those files (for example, read only files named as ABC1-123*.log) and export all their contents to a single csv file (for example, output.csv), that is, a CSV file that will have all the data from the inidividual files collectively.
The code that I have written so far:
#!/usr/bin/env python
import os
file_directory=os.getcwd()
m_class="ABC1"
m_id="123"
device=m_class+"-"+m_id
for data_file in sorted(os.listdir(file_dir)):
if str(device)+"*" in os.listdir(file_dir):
print data_file
I don't know how to read a only a subset of filtered files and also how to export them to a common csv file.
How can I achieve this?
just use re lib to match file name pattern, and use csv lib to export.
Only a few adjustments, You were close
filesFromDir = os.listdir(os.getcwd())
fileList = [file for file in filesFromDir if file.startswith(device)]
f = open("LogOutput.csv", "ab")
for file in fileList:
#print "Processing", file
with open(file, "rb") as log_file:
txt = log_file.read()
f.write(txt)
f.write("\n")
f.close()
Your question could be better stated, based on your current code snipet, I'll assume that you want to:
Filter files in a directory based on glob pattern.
Concatenate their contents to a file named output.csv.
In python you can achieve (1.) by using glob to list filenames.
import glob
for filename in glob.glob('foo*bar'):
print filename
That would print all files starting with foo and ending with bar in
the current directory.
For (2.) you just read the file and write its content to your desired
output, using python's open() builtin function:
open('filename', 'r')
(Using 'r' as the mode you are asking python to open the file for
"reading", using 'w' you are asking python to open the file for
"writing".)
The final code would look like the following:
import glob
import sys
device = 'ABC1-123'
with open('output.csv', 'w') as output:
for filename in glob.glob(device+'*'):
with open(filename, 'r') as input:
output.write(input.read())
You can use the os module to list the files.
import os
files = os.listdir(os.getcwd())
m_class = "ABC1"
m_id = "123"
device = m_class + "-" + m_id
file_extension = ".log"
# filter the files by their extension and the starting name
files = [x for x in files if x.startswith(device) and x.endswith(file_extension)]
f = open("output.csv", "a")
for file in files:
with open(file, "r") as data_file:
f.write(data_file.read())
f.write(",\n")
f.close()
I'm attempting to remove a zipped file after unzipping the contents on windows. The contents can be stored in a folder structure in the zip. I'm using the with statement and thought this would close the file-like object (source var) and zip file. I've removed lines of code relating to saving the source file.
import zipfile
import os
zipped_file = r'D:\test.zip'
with zipfile.ZipFile(zipped_file) as zip_file:
for member in zip_file.namelist():
filename = os.path.basename(member)
if not filename:
continue
source = zip_file.open(member)
os.remove(zipped_file)
The error returned is:
WindowsError: [Error 32] The process cannot access the file because it is being used by another process: 'D:\\test.zip'
I've tried:
looping over the os.remove line in case it's a slight timing issue
Using close explicitly instead of the with statment
Attempted on local C drive and mapped D Drive
instead of passing in a string to the ZipFile constructor, you can pass it a file like object:
import zipfile
import os
zipped_file = r'D:\test.zip'
with open(zipped_file, mode="r") as file:
zip_file = zipfile.ZipFile(file)
for member in zip_file.namelist():
filename = os.path.basename(member)
if not filename:
continue
source = zip_file.open(member)
os.remove(zipped_file)
You are opening files inside the zip... which create a file lock on the whole zip file. close the inner file open first... via source.close() at the end of your loop
import zipfile
import os
zipped_file = r'D:\test.zip'
with zipfile.ZipFile(zipped_file) as zip_file:
for member in zip_file.namelist():
filename = os.path.basename(member)
if not filename:
continue
source = zip_file.open(member)
source.close()
os.remove(zipped_file)
Try to close the zipfile before removing.
you can do also like this, which works pretty good:
import os, shutil, zipfile
fpath= 'C:/Users/dest_folder'
path = os.getcwd()
for file in os.listdir(path):
if file.endswith(".zip"):
dirs = os.path.join(path, file)
if os.path.exists(fpath):
shutil.rmtree(fpath)
_ = os.mkdir(fpath)
with open(dirs, 'rb') as fileobj:
z = zipfile.ZipFile(fileobj)
z.extractall(fpath)
z.close()
os.remove(dirs)