os.walk with regex - python

I'd like to get a list of files that apply to a regex that i have. I guess i should use os.walk, but how can i use it with regex?
Thanks.

I'm not aware of anything in the stdlib implementing this, but it is not hard to code:
import os, os.path
def iter_matching(dirpath, regexp):
"""Generator yielding all files under `dirpath` whose absolute path
matches the regular expression `regexp`.
Usage:
>>> for filename in iter_matching('/', r'/home.*\.bak'):
.... # do something
"""
for dir_, dirnames, filenames in os.walk(dirpath):
for filename in filenames:
abspath = os.path.join(dir_, filename)
if regexp.match(abspath):
yield abspath
Or the more general:
import os, os.path
def filter_filenames(dirpath, predicate):
"""Usage:
>>> for filename in filter_filenames('/', re.compile(r'/home.*\.bak').match):
.... # do something
"""
for dir_, dirnames, filenames in os.walk(dirpath):
for filename in filenames:
abspath = os.path.join(dir_, filename)
if predicate(abspath):
yield abspath

If your regex can be translated into a shell expression such as foo/*.txt then you can use glob.
>>> import glob
>>> glob.glob('./[0-9].*')
['./1.gif', './2.txt']
>>> glob.glob('*.gif')
['1.gif', 'card.gif']
>>> glob.glob('?.gif')
['1.gif']

Related

Wildcard for read_template [duplicate]

This question's answers are a community effort. Edit existing answers to improve this post. It is not currently accepting new answers or interactions.
How can I find all the files in a directory having the extension .txt in python?
You can use glob:
import glob, os
os.chdir("/mydir")
for file in glob.glob("*.txt"):
print(file)
or simply os.listdir:
import os
for file in os.listdir("/mydir"):
if file.endswith(".txt"):
print(os.path.join("/mydir", file))
or if you want to traverse directory, use os.walk:
import os
for root, dirs, files in os.walk("/mydir"):
for file in files:
if file.endswith(".txt"):
print(os.path.join(root, file))
Use glob.
>>> import glob
>>> glob.glob('./*.txt')
['./outline.txt', './pip-log.txt', './test.txt', './testingvim.txt']
Something like that should do the job
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.txt'):
print(file)
You can simply use pathlibs glob 1:
import pathlib
list(pathlib.Path('your_directory').glob('*.txt'))
or in a loop:
for txt_file in pathlib.Path('your_directory').glob('*.txt'):
# do something with "txt_file"
If you want it recursive you can use .glob('**/*.txt')
1The pathlib module was included in the standard library in python 3.4. But you can install back-ports of that module even on older Python versions (i.e. using conda or pip): pathlib and pathlib2.
Something like this will work:
>>> import os
>>> path = '/usr/share/cups/charmaps'
>>> text_files = [f for f in os.listdir(path) if f.endswith('.txt')]
>>> text_files
['euc-cn.txt', 'euc-jp.txt', 'euc-kr.txt', 'euc-tw.txt', ... 'windows-950.txt']
import os
path = 'mypath/path'
files = os.listdir(path)
files_txt = [i for i in files if i.endswith('.txt')]
I like os.walk():
import os
for root, dirs, files in os.walk(dir):
for f in files:
if os.path.splitext(f)[1] == '.txt':
fullpath = os.path.join(root, f)
print(fullpath)
Or with generators:
import os
fileiter = (os.path.join(root, f)
for root, _, files in os.walk(dir)
for f in files)
txtfileiter = (f for f in fileiter if os.path.splitext(f)[1] == '.txt')
for txt in txtfileiter:
print(txt)
Here's more versions of the same that produce slightly different results:
glob.iglob()
import glob
for f in glob.iglob("/mydir/*/*.txt"): # generator, search immediate subdirectories
print f
glob.glob1()
print glob.glob1("/mydir", "*.tx?") # literal_directory, basename_pattern
fnmatch.filter()
import fnmatch, os
print fnmatch.filter(os.listdir("/mydir"), "*.tx?") # include dot-files
Try this this will find all your files recursively:
import glob, os
os.chdir("H:\\wallpaper")# use whatever directory you want
#double\\ no single \
for file in glob.glob("**/*.txt", recursive = True):
print(file)
Python v3.5+
Fast method using os.scandir in a recursive function. Searches for all files with a specified extension in folder and sub-folders. It is fast, even for finding 10,000s of files.
I have also included a function to convert the output to a Pandas Dataframe.
import os
import re
import pandas as pd
import numpy as np
def findFilesInFolderYield(path, extension, containsTxt='', subFolders = True, excludeText = ''):
""" Recursive function to find all files of an extension type in a folder (and optionally in all subfolders too)
path: Base directory to find files
extension: File extension to find. e.g. 'txt'. Regular expression. Or 'ls\d' to match ls1, ls2, ls3 etc
containsTxt: List of Strings, only finds file if it contains this text. Ignore if '' (or blank)
subFolders: Bool. If True, find files in all subfolders under path. If False, only searches files in the specified folder
excludeText: Text string. Ignore if ''. Will exclude if text string is in path.
"""
if type(containsTxt) == str: # if a string and not in a list
containsTxt = [containsTxt]
myregexobj = re.compile('\.' + extension + '$') # Makes sure the file extension is at the end and is preceded by a .
try: # Trapping a OSError or FileNotFoundError: File permissions problem I believe
for entry in os.scandir(path):
if entry.is_file() and myregexobj.search(entry.path): #
bools = [True for txt in containsTxt if txt in entry.path and (excludeText == '' or excludeText not in entry.path)]
if len(bools)== len(containsTxt):
yield entry.stat().st_size, entry.stat().st_atime_ns, entry.stat().st_mtime_ns, entry.stat().st_ctime_ns, entry.path
elif entry.is_dir() and subFolders: # if its a directory, then repeat process as a nested function
yield from findFilesInFolderYield(entry.path, extension, containsTxt, subFolders)
except OSError as ose:
print('Cannot access ' + path +'. Probably a permissions error ', ose)
except FileNotFoundError as fnf:
print(path +' not found ', fnf)
def findFilesInFolderYieldandGetDf(path, extension, containsTxt, subFolders = True, excludeText = ''):
""" Converts returned data from findFilesInFolderYield and creates and Pandas Dataframe.
Recursive function to find all files of an extension type in a folder (and optionally in all subfolders too)
path: Base directory to find files
extension: File extension to find. e.g. 'txt'. Regular expression. Or 'ls\d' to match ls1, ls2, ls3 etc
containsTxt: List of Strings, only finds file if it contains this text. Ignore if '' (or blank)
subFolders: Bool. If True, find files in all subfolders under path. If False, only searches files in the specified folder
excludeText: Text string. Ignore if ''. Will exclude if text string is in path.
"""
fileSizes, accessTimes, modificationTimes, creationTimes , paths = zip(*findFilesInFolderYield(path, extension, containsTxt, subFolders))
df = pd.DataFrame({
'FLS_File_Size':fileSizes,
'FLS_File_Access_Date':accessTimes,
'FLS_File_Modification_Date':np.array(modificationTimes).astype('timedelta64[ns]'),
'FLS_File_Creation_Date':creationTimes,
'FLS_File_PathName':paths,
})
df['FLS_File_Modification_Date'] = pd.to_datetime(df['FLS_File_Modification_Date'],infer_datetime_format=True)
df['FLS_File_Creation_Date'] = pd.to_datetime(df['FLS_File_Creation_Date'],infer_datetime_format=True)
df['FLS_File_Access_Date'] = pd.to_datetime(df['FLS_File_Access_Date'],infer_datetime_format=True)
return df
ext = 'txt' # regular expression
containsTxt=[]
path = 'C:\myFolder'
df = findFilesInFolderYieldandGetDf(path, ext, containsTxt, subFolders = True)
path.py is another alternative: https://github.com/jaraco/path.py
from path import path
p = path('/path/to/the/directory')
for f in p.files(pattern='*.txt'):
print f
To get all '.txt' file names inside 'dataPath' folder as a list in a Pythonic way:
from os import listdir
from os.path import isfile, join
path = "/dataPath/"
onlyTxtFiles = [f for f in listdir(path) if isfile(join(path, f)) and f.endswith(".txt")]
print onlyTxtFiles
Python has all tools to do this:
import os
the_dir = 'the_dir_that_want_to_search_in'
all_txt_files = filter(lambda x: x.endswith('.txt'), os.listdir(the_dir))
I did a test (Python 3.6.4, W7x64) to see which solution is the fastest for one folder, no subdirectories, to get a list of complete file paths for files with a specific extension.
To make it short, for this task os.listdir() is the fastest and is 1.7x as fast as the next best: os.walk() (with a break!), 2.7x as fast as pathlib, 3.2x faster than os.scandir() and 3.3x faster than glob.
Please keep in mind, that those results will change when you need recursive results. If you copy/paste one method below, please add a .lower() otherwise .EXT would not be found when searching for .ext.
import os
import pathlib
import timeit
import glob
def a():
path = pathlib.Path().cwd()
list_sqlite_files = [str(f) for f in path.glob("*.sqlite")]
def b():
path = os.getcwd()
list_sqlite_files = [f.path for f in os.scandir(path) if os.path.splitext(f)[1] == ".sqlite"]
def c():
path = os.getcwd()
list_sqlite_files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith(".sqlite")]
def d():
path = os.getcwd()
os.chdir(path)
list_sqlite_files = [os.path.join(path, f) for f in glob.glob("*.sqlite")]
def e():
path = os.getcwd()
list_sqlite_files = [os.path.join(path, f) for f in glob.glob1(str(path), "*.sqlite")]
def f():
path = os.getcwd()
list_sqlite_files = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(".sqlite"):
list_sqlite_files.append( os.path.join(root, file) )
break
print(timeit.timeit(a, number=1000))
print(timeit.timeit(b, number=1000))
print(timeit.timeit(c, number=1000))
print(timeit.timeit(d, number=1000))
print(timeit.timeit(e, number=1000))
print(timeit.timeit(f, number=1000))
Results:
# Python 3.6.4
0.431
0.515
0.161
0.548
0.537
0.274
import os
import sys
if len(sys.argv)==2:
print('no params')
sys.exit(1)
dir = sys.argv[1]
mask= sys.argv[2]
files = os.listdir(dir);
res = filter(lambda x: x.endswith(mask), files);
print res
To get an array of ".txt" file names from a folder called "data" in the same directory I usually use this simple line of code:
import os
fileNames = [fileName for fileName in os.listdir("data") if fileName.endswith(".txt")]
This code makes my life simpler.
import os
fnames = ([file for root, dirs, files in os.walk(dir)
for file in files
if file.endswith('.txt') #or file.endswith('.png') or file.endswith('.pdf')
])
for fname in fnames: print(fname)
Use fnmatch: https://docs.python.org/2/library/fnmatch.html
import fnmatch
import os
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*.txt'):
print file
A copy-pastable solution similar to the one of ghostdog:
def get_all_filepaths(root_path, ext):
"""
Search all files which have a given extension within root_path.
This ignores the case of the extension and searches subdirectories, too.
Parameters
----------
root_path : str
ext : str
Returns
-------
list of str
Examples
--------
>>> get_all_filepaths('/run', '.lock')
['/run/unattended-upgrades.lock',
'/run/mlocate.daily.lock',
'/run/xtables.lock',
'/run/mysqld/mysqld.sock.lock',
'/run/postgresql/.s.PGSQL.5432.lock',
'/run/network/.ifstate.lock',
'/run/lock/asound.state.lock']
"""
import os
all_files = []
for root, dirs, files in os.walk(root_path):
for filename in files:
if filename.lower().endswith(ext):
all_files.append(os.path.join(root, filename))
return all_files
You can also use yield to create a generator and thus avoid assembling the complete list:
def get_all_filepaths(root_path, ext):
import os
for root, dirs, files in os.walk(root_path):
for filename in files:
if filename.lower().endswith(ext):
yield os.path.join(root, filename)
I suggest you to use fnmatch and the upper method. In this way you can find any of the following:
Name.txt;
Name.TXT;
Name.Txt
.
import fnmatch
import os
for file in os.listdir("/Users/Johnny/Desktop/MyTXTfolder"):
if fnmatch.fnmatch(file.upper(), '*.TXT'):
print(file)
Here's one with extend()
types = ('*.jpg', '*.png')
images_list = []
for files in types:
images_list.extend(glob.glob(os.path.join(path, files)))
Functional solution with sub-directories:
from fnmatch import filter
from functools import partial
from itertools import chain
from os import path, walk
print(*chain(*(map(partial(path.join, root), filter(filenames, "*.txt")) for root, _, filenames in walk("mydir"))))
In case the folder contains a lot of files or memory is an constraint, consider using generators:
def yield_files_with_extensions(folder_path, file_extension):
for _, _, files in os.walk(folder_path):
for file in files:
if file.endswith(file_extension):
yield file
Option A: Iterate
for f in yield_files_with_extensions('.', '.txt'):
print(f)
Option B: Get all
files = [f for f in yield_files_with_extensions('.', '.txt')]
use Python OS module to find files with specific extension.
the simple example is here :
import os
# This is the path where you want to search
path = r'd:'
# this is extension you want to detect
extension = '.txt' # this can be : .jpg .png .xls .log .....
for root, dirs_list, files_list in os.walk(path):
for file_name in files_list:
if os.path.splitext(file_name)[-1] == extension:
file_name_path = os.path.join(root, file_name)
print file_name
print file_name_path # This is the full path of the filter file
Many users have replied with os.walk answers, which includes all files but also all directories and subdirectories and their files.
import os
def files_in_dir(path, extension=''):
"""
Generator: yields all of the files in <path> ending with
<extension>
\param path Absolute or relative path to inspect,
\param extension [optional] Only yield files matching this,
\yield [filenames]
"""
for _, dirs, files in os.walk(path):
dirs[:] = [] # do not recurse directories.
yield from [f for f in files if f.endswith(extension)]
# Example: print all the .py files in './python'
for filename in files_in_dir('./python', '*.py'):
print("-", filename)
Or for a one off where you don't need a generator:
path, ext = "./python", ext = ".py"
for _, _, dirfiles in os.walk(path):
matches = (f for f in dirfiles if f.endswith(ext))
break
for filename in matches:
print("-", filename)
If you are going to use matches for something else, you may want to make it a list rather than a generator expression:
matches = [f for f in dirfiles if f.endswith(ext)]

separate filename after underlining _ os.path

In my path /volume1/xx/ are several files with this character A_test1.pdf, B_test2.pdf, ...I want to seperate the test1 part without path and .pdf.
Im newbie so I tried first with full name
but I got only the "*.pdf" as a text.
What is wrong with the path oder placeholder * ?
splitname = os.path.basename('/volume1/xx/*.pdf')
Edit
I got 2019-01-18_RG-Telekom[] from orign ReT_march - I want 2019-01-18_RG-Telekom_march (text after underlining) xx is a folder
here is the whole code:
#!/usr/bin/env python3
import datetime
import glob
import os
import os.path
SOURCE_PATH = '/volume1/xx'
TARGET_PATH = os.path.join(SOURCE_PATH, 'DMS')
def main():
today = datetime.date.today()
splitnames = [os.path.basename(fpath) for fpath in glob.glob("./xx/*.pdf")]
for prefix, name_part in [
('ReA', 'RG-Amazon'),
('GsA', 'GS-Amazon'),
('ReT', 'RG-Telekom'),
('NoE', 'Notiz-EDV'),
]:
filenames = glob.iglob(os.path.join(SOURCE_PATH, prefix + '*.pdf'))
for old_filename in filenames:
new_filename = os.path.join(TARGET_PATH, '{}_{}_{}.pdf'.format(today, name_part, splitnames))
os.rename(old_filename, new_filename)
if __name__ == '__main__':
main()
Use glob, os.path don't know how to process masks, but glob.glob works:
splitnames = [os.path.basename(fpath) for fpath in glob.glob("./**/*.txt")]
splitnames
Out:
['A_test1.pdf', 'B_test2.pdf']
Output of the glob:
glob.glob("./**/*.txt")
Out:
['./some_folder/A_test1.pdf', './another_folder/B_test2.pdf']
Apply os.path.basename to this list and extract basenames, as it shown above.
Edit
If xx in the path volume1/xx/ is just a folder name, not a mask, you should use following expression:
splitnames = [os.path.basename(fpath) for fpath in glob.glob("./xx/*.txt")]
because ./**/ is expression which masks a folder name and it's unnecessary that case.

select multiple files using glob in python

I have to select all the files in a directory at a time which has y2001, y2002, and y2003 in the mid of filename. How can I?
import glob
files = glob.glob('*y2001*.jpg')
You can do it with
import glob
files = glob.glob('*y200[123]*.jpg')
for futher reference see http://docs.python.org/2/library/glob.html
Here is an overkill method for solving your problem.
import os
import re
import functools
def validate_file(validators, file_path):
return any(re.search(validator, file_path) for validator in validators)
def get_matching_files_in_dir(directory, validator, append_dir=True):
for file_path in os.listdir(directory):
if validator(file_path):
yield os.path.join(directory, file_path) if append_dir else file_path
# define your needs:
matching_patterns = ['y2001', 'y2002', 'y2003']
validator = functools.partial(validate_file, matching_patterns)
# usage
list(get_matching_files_in_dir('YOUR DIR', validator))
An Example:
>>> matching_patterns = ['README']
>>> validator = functools.partial(validate_file, matching_patterns)
>>> print list(get_matching_files_in_dir('C:\\python27', validator))
['C:\\python27\\README.txt']

Recursively append files to zip archive in python

In Python 2.7.4 on Windows, if I have a directory structure that follows:
test/foo/a.bak
test/foo/b.bak
test/foo/bar/c.bak
test/d.bak
And I use the following to add them to an existing archive such that 'd.bak' is at the root of the archive:
import zipfile
import os.path
import fnmatch
def find_files(directory, pattern):
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
if __name__=='__main__':
z = zipfile.ZipFile("testarch.zip", "a", zipfile.ZIP_DEFLATED)
for filename in find_files('test', '*.*'):
print 'Found file:', filename
z.write(filename, os.path.basename(filename), zipfile.ZIP_DEFLATED)
z.close()
The directory of the zip file is flat. It creates the foo/ directory only if a sub-directory exists in it (If I exclude test/foo/bar/c.bak, it does not create the directory. If it is included, foo/ is created but not foo/bar/ if that makes sense), but no sub-directories or files:
foo/
a.bak
b.bak
c.bak
d.bak
Am I missing something?
The problem is that you're explicitly asking it to flatten all the paths:
z.write(filename, os.path.basename(filename), zipfile.ZIP_DEFLATED)
If you look at the docs, the default arcname is:
the same as filename, but without a drive letter and with leading path separators removed
But you're overriding that with os.path.basename(filename). (If you don't know what basename does, it returns "the last pathname component". If you don't want just the last pathname component, don't call basename.)
If you just do z.write('test/foo/bar/c.bak'), it will create a zip entry named test/foo/bar/c.bak, but if you do z.write('test/foo/bar/c.bak', 'c.bak'), it will create a zip entry named c.bak. Since you do that for all of the entries, the whole thing ends up flattened.
I figured it out. As abarnet pointed out, I had misread the docs on zipfiles. Using the following function, I can create the correct archive name for the zip file:
def createArchName(path):
line = path
if "\\" in line:
''' windows '''
discard, val = line.split("\\", 1)
return val
else:
''' unix '''
discard, val = line.split("/", 1)
return val
For those interested, the full code is as follows:
import urllib2
import zipfile
import os.path
import fnmatch
def find_files(directory, pattern):
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def createArchName(path):
line = path
if "\\" in line:
''' windows '''
discard, val = line.split("\\", 1)
return val
else:
''' unix '''
discard, val = line.split("/", 1)
return val
if __name__=='__main__':
if not os.path.exists("test"):
os.mkdir("test")
z = zipfile.ZipFile("testarch.zip", "a", zipfile.ZIP_DEFLATED)
for filename in find_files('test', '*.*'):
archname = createArchName(filename)
print 'Found file:', archname
z.write(filename, archname, zipfile.ZIP_DEFLATED)
z.close()

Use fnmatch.filter to filter files by more than one possible file extension

Given the following piece of python code:
for root, dirs, files in os.walk(directory):
for filename in fnmatch.filter(files, '*.png'):
pass
How can I filter for more than one extension? In this special case I want to get all files ending with *.png, *.gif, *.jpg or *.jpeg.
For now I came up with
for root, dirs, files in os.walk(directory):
for extension in ['jpg', 'jpeg', 'gif', 'png']:
for filename in fnmatch.filter(files, '*.' + extension):
pass
But I think it is not very elegant and performant.
Someone has a better idea?
If you only need to check extensions (i.e. no further wildcards), why don't you simply use basic string operations?
for root, dirs, files in os.walk(directory):
for filename in files:
if filename.endswith(('.jpg', '.jpeg', '.gif', '.png')):
pass
I think your code is actually fine. If you want to touch every filename only once, define your own filtering function:
def is_image_file(filename, extensions=['.jpg', '.jpeg', '.gif', '.png']):
return any(filename.endswith(e) for e in extensions)
for root, dirs, files in os.walk(directory):
for filename in filter(is_image_file, files):
pass
I've been using this with a lot of success.
import fnmatch
import functools
import itertools
import os
# Remove the annotations if you're not on Python3
def find_files(dir_path: str=None, patterns: [str]=None) -> [str]:
"""
Returns a generator yielding files matching the given patterns
:type dir_path: str
:type patterns: [str]
:rtype : [str]
:param dir_path: Directory to search for files/directories under. Defaults to current dir.
:param patterns: Patterns of files to search for. Defaults to ["*"]. Example: ["*.json", "*.xml"]
"""
path = dir_path or "."
path_patterns = patterns or ["*"]
for root_dir, dir_names, file_names in os.walk(path):
filter_partial = functools.partial(fnmatch.filter, file_names)
for file_name in itertools.chain(*map(filter_partial, path_patterns)):
yield os.path.join(root_dir, file_name)
Examples:
for f in find_files(test_directory):
print(f)
yields:
.\test.json
.\test.xml
.\test.ini
.\test_helpers.py
.\__init__.py
Testing with multiple patterns:
for f in find_files(test_directory, ["*.xml", "*.json", "*.ini"]):
print(f)
yields:
.\test.json
.\test.xml
.\test.ini
This would be a better way, perhaps because you are not calling + repeatedly and using a tuple instead of list.
for root, dirs, files in os.walk(directory):
for extension in ('*.jpg', '*.jpeg', '*.gif', '*.png'):
for filename in fnmatch.filter(files, extension):
pass
A tuple is better because you are not going to modify the extension once you have created them. You are just using to iterate over them.
This isn't really elegant either, but it works:
for root, dirs, files in os.walk(directory):
for filename in fnmatch.filter(files, '*.png') + fnmatch.filter(files, '*.jpg') + fnmatch.filter(files, '*.jpeg') + fnmatch.filter(files, '*.gif'):
pass
Here is what I am using to filter files in apache log directories.
Here I exclude errors flles
rep_filters = [now.strftime("%Y%m%d")]
def files_filter(liste_fic, filters = rep_filters):
s = "(fic for fic in liste_fic if fic.find('error') < 0"
for filter in filters:
s += " and fic.find('%s') >=0 " % filter
s += ")"
return eval(s)
Please try this:
# pattern_list = ['*.jpg', '__.*']
def checkFilepatter(filename, pattern_list):
for pattern in pattern_list:
if fnmatch.fnmatch(filename, pattern):
return True
return False
You can use a list comprehension to check if my_file matches any of the file masks defined in patterns:
import fnmatch
my_file = 'my_precious.txt'
patterns = ('*.txt', '*.html', '*.mp3')
if [pat for pat in patterns if fnmatch.fnmatch(my_file, pat)]:
print('We have a match!')
else:
print('No match')
Internally, fnmatch users regular expressions. And there's a method that makes a regex from an fnmatch pattern — fnmatch.translate. This may also give a little speed-up.
import fnmatch
import os
import re
image_exts = ['jpg', 'jpeg', 'gif', 'png']
image_re = re.compile('|'.join(fnmatch.translate('*.' + e) for e in image_exts))
for root, dirs, files in os.walk(directory):
for filename in files:
if image_re.match(filename):
...
The clearest solution is:
import os
for root, dirs, files in os.walk(directory):
for filename in files:
_, ext = os.path.splitext(filename)
if ext in ['.jpg', '.jpeg', '.gif', '.png']:
...
or, using pathlib,
for path in pathlib.Path(directory).glob('**/*'):
if path.suffix in ['.jpg', '.jpeg', '.gif', '.png']:
...

Categories