with python how can i loop through folders and copy out subfolders with the same name from every folder and save it in a unique folder with the name. so A in test 1 and in test 2 should be copied to a folder called A and loop through doing the same for the rest
i was able to loop through the main dir and got the dir(test 1 and test#) by using os.walk command
Are you looking for something like this?
import os, shutil;
directory = "/home/.../test1";
dest_dir = "/home/.../test2";
filelist = [];
for root, dirs, files in os.walk(directory):
for file in files:
filelist.append(os.path.join(root,file));
for subFile in filelist:
shutil.copy(subFile, dest_dir);
Related
I have the following structure:
Folder1
ZZ-20201201-XX.zip
Folder2
XX-20201201-XX.zip
XX-20201202-XX.zip
Folder3
YY-20201201-XX.zip
YY-20201202-XX.zip
With below code Im creating a counterpart of Folder1, Folder2 and Folder3 and directly unzipping the zipped files inside those 3 folders. So I receive this:
Folder1
ZZ-.txt
Folder2
XX-.txt
Folder3
YY.txt
As you can see the files lose the date once they get unzipped so if a folder contains 2 zipped files they will get the same name and thus the files will be rewritten. Now I want to add the date of the zipped files to the files once they are unzipped. How can I do this?
import fnmatch
pattern = '*.zip'
for root, dirs, files in os.walk(my_files):
for filename in fnmatch.filter(files, pattern):
path = os.path.join(root, filename)
date_zipped_file = re.search('-(.\d+)-', filename).group(1) #<-- this is the date of the zipped files and I want this to be included in the name of the unzipped files once they get unzipped.
# Store the new directory so that it can be recreated
new_dir = os.path.normpath(os.path.join(os.path.relpath(path, start=my_files), ".."))
# Join your target directory with newly created directory
new = os.path.join(counter_part, new_dir)
# Create those folders, works even with nested folders
if (not os.path.exists(new)):
os.makedirs(new)
zipfile.ZipFile(path).extractall(new)
my desired outcome:
Folder1
ZZ-20201201.txt
Folder2
XX-20201201.txt
XX-20201202.txt
Folder3
YY-20201201.txt
XX-20201202.txt
You could just rename the files after you have unzipped each folder. Something like this:
#get all files in that unzipped folder
files = os.listdir(path)
#rename all files in that dir
for file in files:
filesplit = os.path.splitext(os.path.basename(file))
os.rename(os.path.join(path, file), os.path.join(path, filesplit[0]+'_'+date_zipped_file+filesplit[1]))
but that also renames files which might actually have already a date in the name. So you would also need to integrate a check if the file was already renamed. Either by maintaining a list with file names or a simple regex which looks for 8 digit string between a '_' and a '.', e.g. text_20201207.txt.
#get all files in that unzipped folder
files = os.listdir(path)
#rename all files in that dir
for file in files:
filesplit = os.path.splitext(os.path.basename(file))
if not re.search(r'_\d{8}.', file):
os.rename(os.path.join(path, file), os.path.join(path, filesplit[0]+'_'+date_zipped_file+filesplit[1]))
your final solution would then look something like this:
import fnmatch
pattern = '*.zip'
for root, dirs, files in os.walk(my_files):
for filename in fnmatch.filter(files, pattern):
path = os.path.join(root, filename)
date_zipped_file = re.search('-(.\d+)-', filename).group(1) #<-- this is the date of the zipped files and I want this to be included in the name of the unzipped files once they get unzipped.
# Store the new directory so that it can be recreated
new_dir = os.path.normpath(os.path.join(os.path.relpath(path, start=my_files), ".."))
# Join your target directory with newly created directory
new = os.path.join(counter_part, new_dir)
# Create those folders, works even with nested folders
if (not os.path.exists(new)):
os.makedirs(new)
zipfile.ZipFile(path).extractall(new)
#get all files in that unzipped folder
files = os.listdir(new)
#rename all files in that dir
for file in files:
filesplit = os.path.splitext(os.path.basename(file))
if not re.search(r'_\d{8}.', file):
os.rename(os.path.join(new, file), os.path.join(new, filesplit[0]+'_'+date_zipped_file+filesplit[1]))
I have ~1000 directories, containing various .csv files within them. I am trying to check if a specific type of csv file, containing a filename that begins with PTSD_OCOTBER, exists in each directory.
If this file does not exist in the directory, I want to print out that directory into a .txt file.
Here is what I have so far.
import os,sys,time,shutil
import subprocess
#determine filetype to look for.
file_type = ".csv"
print("Running file counter for" + repr(file_type))
#for each folder in the root directory
for subdir, dirs, files in os.walk(rootdir):
if("GeneSet" in subdir):
folder_name = subdir.rsplit('/', 1)[-1] #get the folder name.
for f in files:
#unclear how to write this part.
#how to tell if no files exist in directory?
This successfully finds the .csv files of interest, but how do achieve the above?
So files is the list of files in that directory that you are currently walking. You want to know if there are no files that start with PTSD_OCOTBER (PTSD_OCTOBER ?):
for subdir, dirs, files in os.walk(rootdir):
if("GeneSet" in subdir):
folder_name = subdir.rsplit('/', 1)[-1] #get the folder name.
dir_of_interest = not any(f.startswith('PTSD_OCOTBER') for f in files)
if dir_of_interest:
# do stuff with folder_name
Now you want to save the results into a text file? If you have a Unix-style computer, then you can use output redirection on your terminal, such as
python3 fileanalysis.py > result.txt
after writing print(folder_name) instead of # do stuff with folder_name.
Or you can use Python itself to write the file, such as:
found_dirs = []
for subdir, dirs, files in os.walk(rootdir):
...
if dir_of_interest:
found_dirs.append(folder_name)
with open('result.txt', 'w') as f:
f.write('\n'.join(found_dirs))
How can I list all folders of a directory in Python and remove them ?
for root, dirs, files in os.walk(r'/home/m110/public_html/ts/'):
print(root)
print(dirs)
print(files)
i run this code in Centos7 but i just need list folder for delete times
import os
import shutil
dirs= next(os.walk(r'/home/m110/public_html/ts/'))[1]
for name in dirs:
print name
shutil.rmtree(name)
I'm having troubles finding and deleting empty folders with my Python script.
I have some directories with files more or less like this:
A/
--B/
----a.txt
----b.pdf
--C/
----d.pdf
I'm trying to delete all files which aren't PDFs and after that delete all empty folders. I can delete the files that I want to, but then I can't get the empty directories. What I'm doing wrong?
os.chdir(path+"/"+name+"/Test Data/Checklists")
pprint("Current path: "+ os.getcwd())
for root, dirs, files in os.walk(path+"/"+name+"/Test Data/Checklists"):
for name in files:
if not(name.endswith(".pdf")):
os.remove(os.path.join(root, name))
pprint("Deletting empty folders..")
pprint("Current path: "+ os.getcwd())
for root, dirs, files in os.walk(path+"/"+name+"/Test Data/Checklists", topdown=False):
if not dirs and not files:
os.rmdir(root)
use insted the function
os.removedirs(path)
this will remove directories until the parent directory is not empty.
Ideally, you should remove the directories immediately after deleting the files, rather than doing two passes with os.walk
import sys
import os
for dir, subdirs, files in os.walk(sys.argv[1], topdown=False):
for name in files:
if not(name.endswith(".pdf")):
os.remove(os.path.join(dir, name))
# check whether the directory is now empty after deletions, and if so, remove it
if len(os.listdir(dir)) == 0:
os.rmdir(dir)
For empty folders deletion you can use this snippet.
It can be combined with some files deletion, but as last run should be used as is.
import os
def drop_empty_folders(directory):
"""Verify that every empty folder removed in local storage."""
for dirpath, dirnames, filenames in os.walk(directory, topdown=False):
if not dirnames and not filenames:
os.rmdir(dirpath)
remove all empty folders
import os
folders = './A/' # directory
for folder in list(os.walk(folders)) :
if not os.listdir(folder[0]):
os.removedirs(folder[0])
I wrote this script to make M3u files for my music collection so i can open just one file and listen to a whole cd or w.e.
What my script does ATM is: make an M3u file for every song within the CWD and the underlaying folders in one M3u file which he places in the CWD.
But i want to also make an M3u file in every sub folder of the CWD.
So upon reaching a subfolder it should open a file with the filename of the CWD and place all the names of that folder into that file and save the file as: "CWD".M3u
import os,sys
folder_name=os.path.basename(os.getcwd())
folder=os.getcwd()
ext3=['.mp3','.Mp3']
file=open('%s.m3u'%(folder_name),'w')
for root, dirs, files in os.walk(folder):
for x in files:
if x[-4:] in ext3:
print(root+'\\'+x)
file.write('%s\%s\n'%(root,x))
file.close()
if not x[-4:] in ext3:
print("List is empty.")
I think this is what you're looking for. os.walk is actually recursive, so your code could be made to work just by opening a new .m3u file in the directly currently being walked over on every iteration of the outer for loop:
import os
exts = ('.mp3','.Mp3')
for root, dirs, files in os.walk(os.getcwd()):
m3uname = os.path.basename(root)
with open("{}.m3u".format(os.path.join(root, m3uname)), 'w') as outfile:
for f in files:
if f.endswith(exts):
outfile.write('{}\n'.format(os.path.join(root, f)))