How do I remove ' ' from a list? - python

I am trying to remove ' ' from the list and it gives me IndexError: list out of range
I have tried new_string.remove(' ') as well
def func_name(new_string):
for k in range(len(new_string)):
if new_string[k] == '':
new_string = new_string[:k] + new_string[k+1:]
return new_string
print func_name(['Title','','This','is','a','link','.'])
print func_name(['','','Hello','World!','',''])
print func_name(['',''])

You should try a more pythonic way, like this:
def func_name(new_string):
return [s for s in new_string if s != '']
print func_name(['Title', '', 'This', 'is', 'a', 'link', '.'])
print func_name(['', '', 'Hello', 'World!', '', ''])
print func_name(['', ''])

Your problem is that the list is shortened, but the indices are still iterated as if it's the original list.
There are better ways of implementing this (see other answers), but to fix your implementation, you can iterate in reverse order thus avoiding the "overflow":
def remove_empty(s):
for k in reversed(range(len(s))):
if s[k] == '':
del s[k]
This only works in case where you remove at most one element at each iteration.
Note that this mutates s in place.
>>> a = ['Title','','This','is','a','link','.']
>>> remove_empty(a)
>>> a
['Title','This','is','a','link','.']

check an example:
x = ["don't", '', 13]
while '' in x:
x.remove('')
assert x == ["don't", 13]

Related

Removing specific set of characters in a list of strings

I have a list of strings, and want to use another list of strings and remove any instance of the combination of bad list in my list. Such as the output of the below would be foo, bar, foobar, foofoo... Currently I have tried a few things for example below
mylist = ['foo!', 'bar\\n', 'foobar!!??!!', 'foofoo::!*']
remove_list = ['\\n', '!', '*', '?', ':']
for remove in remove_list:
for strings in mylist:
strings = strings.replace(bad, ' ')
The above code doesnt work, I did at one point set it to a new variable and append that afterwords but that wasnt working well becuase if their was two issues in a string it would be appended twice.
You changed the temporary variable, not the original list. Instead, assign the result back into mylist
for bad in remove_list:
for pos, string in enumerate(mylist):
mylist[pos] = string.replace(bad, ' ')
Try this:
mylist = ['foo!', 'bar\\n', 'foobar!!??!!', 'foofoo::!*']
bads = ['\\n', '!', '*', '?', ':']
result = []
for s in mylist:
# s is a temporary copy
for bad in bads:
s = s.replace(bad, '') # for all bad remove it
result.append(s)
print(result)
Could be implemented more concise, but this way it's more understandable.
I had a hard time interpreting the question, but I see you have the result desired at the top of your question.
mylist = ['foo!', 'bar\\n', 'foobar!!??!!', 'foofoo::!*']
remove_list = ['\\n', '!', '*', '?', ':']
output = output[]
for strings in mylist:
for remove in remove_list:
strings = strings.replace(remove, '')
output.append(strings)
import re
for list1 in mylist:
t = regex.sub('', list1)
print(t)
If you just want to get rid of non-chars do this. It works a lot better than comparing two separate array lists.
Why not have regex do the work for you? No nested loops this way (just make sure to escape correctly):
import re
mylist = ['foo!', 'bar\\n', 'foobar!!??!!', 'foofoo::!*']
remove_list = [r'\\n', '\!', '\*', '\?', ':']
removals = re.compile('|'.join(remove_list))
print([removals.sub('', s) for s in mylist])
['foo', 'bar', 'foobar', 'foofoo']
Another solution you can use is a comprehension list and remove the characters you want. After that, you delete duplicates.
list_good = [word.replace(bad, '') for word in mylist for bad in remove_list]
list_good = list(set(list_good))
my_list = ["foo!", "bar\\n", "foobar!!??!!", "foofoo::*!"]
to_remove = ["!", "\\n", "?", ":", "*"]
for index, item in enumerate(my_list):
for char in to_remove:
if char in item:
item = item.replace(char, "")
my_list[index] = item
print(my_list) # outputs [“foo”,”bar”,”foobar”,”foofoo”]

Splitting text without losing separator [duplicate]

Here's the simplest way to explain this. Here's what I'm using:
re.split('\W', 'foo/bar spam\neggs')
>>> ['foo', 'bar', 'spam', 'eggs']
Here's what I want:
someMethod('\W', 'foo/bar spam\neggs')
>>> ['foo', '/', 'bar', ' ', 'spam', '\n', 'eggs']
The reason is that I want to split a string into tokens, manipulate it, then put it back together again.
The docs of re.split mention:
Split string by the occurrences of pattern. If capturing
parentheses are used in pattern, then the text of all groups in the
pattern are also returned as part of the resulting list.
So you just need to wrap your separator with a capturing group:
>>> re.split('(\W)', 'foo/bar spam\neggs')
['foo', '/', 'bar', ' ', 'spam', '\n', 'eggs']
If you are splitting on newline, use splitlines(True).
>>> 'line 1\nline 2\nline without newline'.splitlines(True)
['line 1\n', 'line 2\n', 'line without newline']
(Not a general solution, but adding this here in case someone comes here not realizing this method existed.)
another example, split on non alpha-numeric and keep the separators
import re
a = "foo,bar#candy*ice%cream"
re.split('([^a-zA-Z0-9])',a)
output:
['foo', ',', 'bar', '#', 'candy', '*', 'ice', '%', 'cream']
explanation
re.split('([^a-zA-Z0-9])',a)
() <- keep the separators
[] <- match everything in between
^a-zA-Z0-9 <-except alphabets, upper/lower and numbers.
If you have only 1 separator, you can employ list comprehensions:
text = 'foo,bar,baz,qux'
sep = ','
Appending/prepending separator:
result = [x+sep for x in text.split(sep)]
#['foo,', 'bar,', 'baz,', 'qux,']
# to get rid of trailing
result[-1] = result[-1].strip(sep)
#['foo,', 'bar,', 'baz,', 'qux']
result = [sep+x for x in text.split(sep)]
#[',foo', ',bar', ',baz', ',qux']
# to get rid of trailing
result[0] = result[0].strip(sep)
#['foo', ',bar', ',baz', ',qux']
Separator as it's own element:
result = [u for x in text.split(sep) for u in (x, sep)]
#['foo', ',', 'bar', ',', 'baz', ',', 'qux', ',']
results = result[:-1] # to get rid of trailing
Another no-regex solution that works well on Python 3
# Split strings and keep separator
test_strings = ['<Hello>', 'Hi', '<Hi> <Planet>', '<', '']
def split_and_keep(s, sep):
if not s: return [''] # consistent with string.split()
# Find replacement character that is not used in string
# i.e. just use the highest available character plus one
# Note: This fails if ord(max(s)) = 0x10FFFF (ValueError)
p=chr(ord(max(s))+1)
return s.replace(sep, sep+p).split(p)
for s in test_strings:
print(split_and_keep(s, '<'))
# If the unicode limit is reached it will fail explicitly
unicode_max_char = chr(1114111)
ridiculous_string = '<Hello>'+unicode_max_char+'<World>'
print(split_and_keep(ridiculous_string, '<'))
One Lazy and Simple Solution
Assume your regex pattern is split_pattern = r'(!|\?)'
First, you add some same character as the new separator, like '[cut]'
new_string = re.sub(split_pattern, '\\1[cut]', your_string)
Then you split the new separator, new_string.split('[cut]')
You can also split a string with an array of strings instead of a regular expression, like this:
def tokenizeString(aString, separators):
#separators is an array of strings that are being used to split the string.
#sort separators in order of descending length
separators.sort(key=len)
listToReturn = []
i = 0
while i < len(aString):
theSeparator = ""
for current in separators:
if current == aString[i:i+len(current)]:
theSeparator = current
if theSeparator != "":
listToReturn += [theSeparator]
i = i + len(theSeparator)
else:
if listToReturn == []:
listToReturn = [""]
if(listToReturn[-1] in separators):
listToReturn += [""]
listToReturn[-1] += aString[i]
i += 1
return listToReturn
print(tokenizeString(aString = "\"\"\"hi\"\"\" hello + world += (1*2+3/5) '''hi'''", separators = ["'''", '+=', '+', "/", "*", "\\'", '\\"', "-=", "-", " ", '"""', "(", ")"]))
Here is a simple .split solution that works without regex.
This is an answer for Python split() without removing the delimiter, so not exactly what the original post asks but the other question was closed as a duplicate for this one.
def splitkeep(s, delimiter):
split = s.split(delimiter)
return [substr + delimiter for substr in split[:-1]] + [split[-1]]
Random tests:
import random
CHARS = [".", "a", "b", "c"]
assert splitkeep("", "X") == [""] # 0 length test
for delimiter in ('.', '..'):
for _ in range(100000):
length = random.randint(1, 50)
s = "".join(random.choice(CHARS) for _ in range(length))
assert "".join(splitkeep(s, delimiter)) == s
# This keeps all separators in result
##########################################################################
import re
st="%%(c+dd+e+f-1523)%%7"
sh=re.compile('[\+\-//\*\<\>\%\(\)]')
def splitStringFull(sh, st):
ls=sh.split(st)
lo=[]
start=0
for l in ls:
if not l : continue
k=st.find(l)
llen=len(l)
if k> start:
tmp= st[start:k]
lo.append(tmp)
lo.append(l)
start = k + llen
else:
lo.append(l)
start =llen
return lo
#############################
li= splitStringFull(sh , st)
['%%(', 'c', '+', 'dd', '+', 'e', '+', 'f', '-', '1523', ')%%', '7']
replace all seperator: (\W) with seperator + new_seperator: (\W;)
split by the new_seperator: (;)
def split_and_keep(seperator, s):
return re.split(';', re.sub(seperator, lambda match: match.group() + ';', s))
print('\W', 'foo/bar spam\neggs')
If one wants to split string while keeping separators by regex without capturing group:
def finditer_with_separators(regex, s):
matches = []
prev_end = 0
for match in regex.finditer(s):
match_start = match.start()
if (prev_end != 0 or match_start > 0) and match_start != prev_end:
matches.append(s[prev_end:match.start()])
matches.append(match.group())
prev_end = match.end()
if prev_end < len(s):
matches.append(s[prev_end:])
return matches
regex = re.compile(r"[\(\)]")
matches = finditer_with_separators(regex, s)
If one assumes that regex is wrapped up into capturing group:
def split_with_separators(regex, s):
matches = list(filter(None, regex.split(s)))
return matches
regex = re.compile(r"([\(\)])")
matches = split_with_separators(regex, s)
Both ways also will remove empty groups which are useless and annoying in most of the cases.
install wrs "WITHOUT REMOVING SPLITOR" BY DOING
pip install wrs
(developed by Rao Hamza)
import wrs
text = "Now inbox “how to make spam ad” Invest in hard email marketing."
splitor = 'email | spam | inbox'
list = wrs.wr_split(splitor, text)
print(list)
result:
['now ', 'inbox “how to make ', 'spam ad” invest in hard ', 'email marketing.']
I had a similar issue trying to split a file path and struggled to find a simple answer.
This worked for me and didn't involve having to substitute delimiters back into the split text:
my_path = 'folder1/folder2/folder3/file1'
import re
re.findall('[^/]+/|[^/]+', my_path)
returns:
['folder1/', 'folder2/', 'folder3/', 'file1']
I found this generator based approach more satisfying:
def split_keep(string, sep):
"""Usage:
>>> list(split_keep("a.b.c.d", "."))
['a.', 'b.', 'c.', 'd']
"""
start = 0
while True:
end = string.find(sep, start) + 1
if end == 0:
break
yield string[start:end]
start = end
yield string[start:]
It avoids the need to figure out the correct regex, while in theory should be fairly cheap. It doesn't create new string objects and, delegates most of the iteration work to the efficient find method.
... and in Python 3.8 it can be as short as:
def split_keep(string, sep):
start = 0
while (end := string.find(sep, start) + 1) > 0:
yield string[start:end]
start = end
yield string[start:]
May I just leave it here
s = 'foo/bar spam\neggs'
print(s.replace('/', '+++/+++').replace(' ', '+++ +++').replace('\n', '+++\n+++').split('+++'))
['foo', '/', 'bar', ' ', 'spam', '\n', 'eggs']
Use re.split and also your regular expression comes from variable and also you have multi separator ,you can use as the following:
# BashSpecialParamList is the special param in bash,
# such as your separator is the bash special param
BashSpecialParamList = ["$*", "$#", "$#", "$?", "$-", "$$", "$!", "$0"]
# aStr is the the string to be splited
aStr = "$a Klkjfd$0 $? $#%$*Sdfdf"
reStr = "|".join([re.escape(sepStr) for sepStr in BashSpecialParamList])
re.split(f'({reStr})', aStr)
# Then You can get the result:
# ['$a Klkjfd', '$0', ' ', '$?', ' ', '$#', '%', '$*', 'Sdfdf']
reference: GNU Bash Special Parameters
Some of those answers posted before, will repeat delimiter, or have some other bugs which I faced in my case. You can use this function, instead:
def split_and_keep_delimiter(input, delimiter):
result = list()
idx = 0
while delimiter in input:
idx = input.index(delimiter);
result.append(input[0:idx+len(delimiter)])
input = input[idx+len(delimiter):]
result.append(input)
return result
In the below code, there is a simple, very efficient and well tested answer to this question. The code has comments explaining everything in it.
I promise it's not as scary as it looks - it's actually only 13 lines of code! The rest are all comments, docs and assertions
def split_including_delimiters(input: str, delimiter: str):
"""
Splits an input string, while including the delimiters in the output
Unlike str.split, we can use an empty string as a delimiter
Unlike str.split, the output will not have any extra empty strings
Conequently, len(''.split(delimiter))== 0 for all delimiters,
whereas len(input.split(delimiter))>0 for all inputs and delimiters
INPUTS:
input: Can be any string
delimiter: Can be any string
EXAMPLES:
>>> split_and_keep_delimiter('Hello World ! ',' ')
ans = ['Hello ', 'World ', ' ', '! ', ' ']
>>> split_and_keep_delimiter("Hello**World**!***", "**")
ans = ['Hello', '**', 'World', '**', '!', '**', '*']
EXAMPLES:
assert split_and_keep_delimiter('-xx-xx-','xx') == ['-', 'xx', '-', 'xx', '-'] # length 5
assert split_and_keep_delimiter('xx-xx-' ,'xx') == ['xx', '-', 'xx', '-'] # length 4
assert split_and_keep_delimiter('-xx-xx' ,'xx') == ['-', 'xx', '-', 'xx'] # length 4
assert split_and_keep_delimiter('xx-xx' ,'xx') == ['xx', '-', 'xx'] # length 3
assert split_and_keep_delimiter('xxxx' ,'xx') == ['xx', 'xx'] # length 2
assert split_and_keep_delimiter('xxx' ,'xx') == ['xx', 'x'] # length 2
assert split_and_keep_delimiter('x' ,'xx') == ['x'] # length 1
assert split_and_keep_delimiter('' ,'xx') == [] # length 0
assert split_and_keep_delimiter('aaa' ,'xx') == ['aaa'] # length 1
assert split_and_keep_delimiter('aa' ,'xx') == ['aa'] # length 1
assert split_and_keep_delimiter('a' ,'xx') == ['a'] # length 1
assert split_and_keep_delimiter('' ,'' ) == [] # length 0
assert split_and_keep_delimiter('a' ,'' ) == ['a'] # length 1
assert split_and_keep_delimiter('aa' ,'' ) == ['a', '', 'a'] # length 3
assert split_and_keep_delimiter('aaa' ,'' ) == ['a', '', 'a', '', 'a'] # length 5
"""
# Input assertions
assert isinstance(input,str), "input must be a string"
assert isinstance(delimiter,str), "delimiter must be a string"
if delimiter:
# These tokens do not include the delimiter, but are computed quickly
tokens = input.split(delimiter)
else:
# Edge case: if the delimiter is the empty string, split between the characters
tokens = list(input)
# The following assertions are always true for any string input and delimiter
# For speed's sake, we disable this assertion
# assert delimiter.join(tokens) == input
output = tokens[:1]
for token in tokens[1:]:
output.append(delimiter)
if token:
output.append(token)
# Don't let the first element be an empty string
if output[:1]==['']:
del output[0]
# The only case where we should have an empty string in the output is if it is our delimiter
# For speed's sake, we disable this assertion
# assert delimiter=='' or '' not in output
# The resulting strings should be combinable back into the original string
# For speed's sake, we disable this assertion
# assert ''.join(output) == input
return output
>>> line = 'hello_toto_is_there'
>>> sep = '_'
>>> [sep + x[1] if x[0] != 0 else x[1] for x in enumerate(line.split(sep))]
['hello', '_toto', '_is', '_there']

Python List Comprehensions - Comparing with Dict

I write a code that has str data
def characters(self, content):
self.contentText = content.split()
# self.contentText is List here
I am sending self.contentText list to another module as:
self.contentText = Formatter.formatter(self.contentText)
In this method, I am writing below code:
remArticles = remArticles = {' a ':'', ' the ':'', ' and ':'', ' an ':'', '& nbsp;':''}
contentText = [i for i in contentText if i not in remArticles.keys()]
But it is not replacing. Is it that remArticles should be list and not dict
But I tried replacing it with list too. It wouldn't simply replace.
ofcourse with list, below will be the code:
contentText = [i for i in contentText if i not in remArticles]
This is continuation from Accessing Python List Type
Initially I was trying:
for i in remArticles:
print type(contentText)
print "1"
contentText = contentText.replace(i, remArticles[i])
print type(contentText)
But that threw errors:
contentText = contentText.replace(i, remArticles[i])
AttributeError: 'list' object has no attribute 'replace'
Your question is not clear but if your goal is to convert a string to a list, remove unwanted words, and then turn the list back into a string, then you can do it like this:
def clean_string(s):
words_to_remove = ['a', 'the', 'and', 'an', ' ']
list_of_words = s.split()
cleaned_list = [word for word in list_of_words if word not in words_to_remove]
new_string = ' '.join(cleaned_list)
return new_string
This is how you could do the same without converting to a list:
def clean_string(s):
words_to_remove = ['a', 'the', 'and', 'an', ' ']
for word in words_to_remove:
s = s.replace(word, '')
return s
And if you wanted more flexibility in removing some words but replacing others, you could do the following with a dictionary:
def clean_string(s):
words_to_replace = {'a': '', 'the': '', 'and': '&', 'an': '', ' ': ' '}
for old, new in words_to_replace.items():
s = s.replace(old, new)
return s
Your problem is that your map contains spaces within the keys. Following code solves your problem:
[i for i in contentText if i not in map(lambda x: x.strip(), remArticles.keys())]

How to prevent an procedure similar to the split () function (but with multiple separators) returns ' ' in its output

guys, I'm a programming newbie trying to improve the procedure bellow in a way that when I pass it this argument: split_string("After the flood ... all the colors came out."," .") it returns it:
['After', 'the', 'flood', 'all', 'the', 'colors', 'came', 'out']
and not this:
['After', 'the', 'flood', '', '', '', '', 'all', 'the', 'colors', 'came', 'out', '']
Any hint of how to do this? (I could just iterate again the list and delete the '' elements, but I wanted a more elegant solution)
This is the procedure:
def split_string(source, separatorList):
splited = [source]
for separator in splitlist:
source = splited
splited = []
print 'separator= ', separator
for sequence in source:
print 'sequence = ', sequence
if sequence not in splitlist and sequence != ' ':
splited = splited + sequence.split(separator)
return splited
print split_string("This is a test-of the,string separation-code!", " ,!-")
print
print split_string("After the flood ... all the colors came out."," .")
You can filter out the empty strings in the return statement:
return [x for x in split if x]
As a side note, I think it would be easier to write your function based on re.split():
def split_string(s, separators):
pattern = "|".join(re.escape(sep) for sep in separators)
return [x for x in re.split(pattern, s) if x]
print re.split('[. ]+', 'After the flood ... all the colors came out.')
or, better, the other way round
print re.findall('[^. ]+', 'After the flood ... all the colors came out.')
Let's see where did the empty strings come from first, try to execute this in shell:
>>> 'After the'.split(' ')
result:
['After', '', 'the']
This was because when split method came to ' ' in the string, it find nothing but '' between two spaces.
So the solution is simple, just check the boolean value of every item get from .split(
def split_string(source, separatorList):
splited = [source]
for separator in separatorList:
# if you want to exchange two variables, then write in one line can make the code more clear
source, splited = splited, []
for sequence in source:
# there's no need to check `sequence` in advance, just split it
# if sequence not in separatorList and sequence != ' ':
# splited = splited + sequence.split(separator)
# code to prevent appearance of `''` is here, do a if check in list comprehension.
# `+=` is equivalent to `= splited +`
splited += [i for i in sequence.split(separator) if i]
return splited
More details about [i for i in a_list if i] see PEP 202

In Python, how do I split a string and keep the separators?

Here's the simplest way to explain this. Here's what I'm using:
re.split('\W', 'foo/bar spam\neggs')
>>> ['foo', 'bar', 'spam', 'eggs']
Here's what I want:
someMethod('\W', 'foo/bar spam\neggs')
>>> ['foo', '/', 'bar', ' ', 'spam', '\n', 'eggs']
The reason is that I want to split a string into tokens, manipulate it, then put it back together again.
The docs of re.split mention:
Split string by the occurrences of pattern. If capturing
parentheses are used in pattern, then the text of all groups in the
pattern are also returned as part of the resulting list.
So you just need to wrap your separator with a capturing group:
>>> re.split('(\W)', 'foo/bar spam\neggs')
['foo', '/', 'bar', ' ', 'spam', '\n', 'eggs']
If you are splitting on newline, use splitlines(True).
>>> 'line 1\nline 2\nline without newline'.splitlines(True)
['line 1\n', 'line 2\n', 'line without newline']
(Not a general solution, but adding this here in case someone comes here not realizing this method existed.)
another example, split on non alpha-numeric and keep the separators
import re
a = "foo,bar#candy*ice%cream"
re.split('([^a-zA-Z0-9])',a)
output:
['foo', ',', 'bar', '#', 'candy', '*', 'ice', '%', 'cream']
explanation
re.split('([^a-zA-Z0-9])',a)
() <- keep the separators
[] <- match everything in between
^a-zA-Z0-9 <-except alphabets, upper/lower and numbers.
If you have only 1 separator, you can employ list comprehensions:
text = 'foo,bar,baz,qux'
sep = ','
Appending/prepending separator:
result = [x+sep for x in text.split(sep)]
#['foo,', 'bar,', 'baz,', 'qux,']
# to get rid of trailing
result[-1] = result[-1].strip(sep)
#['foo,', 'bar,', 'baz,', 'qux']
result = [sep+x for x in text.split(sep)]
#[',foo', ',bar', ',baz', ',qux']
# to get rid of trailing
result[0] = result[0].strip(sep)
#['foo', ',bar', ',baz', ',qux']
Separator as it's own element:
result = [u for x in text.split(sep) for u in (x, sep)]
#['foo', ',', 'bar', ',', 'baz', ',', 'qux', ',']
results = result[:-1] # to get rid of trailing
Another no-regex solution that works well on Python 3
# Split strings and keep separator
test_strings = ['<Hello>', 'Hi', '<Hi> <Planet>', '<', '']
def split_and_keep(s, sep):
if not s: return [''] # consistent with string.split()
# Find replacement character that is not used in string
# i.e. just use the highest available character plus one
# Note: This fails if ord(max(s)) = 0x10FFFF (ValueError)
p=chr(ord(max(s))+1)
return s.replace(sep, sep+p).split(p)
for s in test_strings:
print(split_and_keep(s, '<'))
# If the unicode limit is reached it will fail explicitly
unicode_max_char = chr(1114111)
ridiculous_string = '<Hello>'+unicode_max_char+'<World>'
print(split_and_keep(ridiculous_string, '<'))
One Lazy and Simple Solution
Assume your regex pattern is split_pattern = r'(!|\?)'
First, you add some same character as the new separator, like '[cut]'
new_string = re.sub(split_pattern, '\\1[cut]', your_string)
Then you split the new separator, new_string.split('[cut]')
You can also split a string with an array of strings instead of a regular expression, like this:
def tokenizeString(aString, separators):
#separators is an array of strings that are being used to split the string.
#sort separators in order of descending length
separators.sort(key=len)
listToReturn = []
i = 0
while i < len(aString):
theSeparator = ""
for current in separators:
if current == aString[i:i+len(current)]:
theSeparator = current
if theSeparator != "":
listToReturn += [theSeparator]
i = i + len(theSeparator)
else:
if listToReturn == []:
listToReturn = [""]
if(listToReturn[-1] in separators):
listToReturn += [""]
listToReturn[-1] += aString[i]
i += 1
return listToReturn
print(tokenizeString(aString = "\"\"\"hi\"\"\" hello + world += (1*2+3/5) '''hi'''", separators = ["'''", '+=', '+', "/", "*", "\\'", '\\"', "-=", "-", " ", '"""', "(", ")"]))
Here is a simple .split solution that works without regex.
This is an answer for Python split() without removing the delimiter, so not exactly what the original post asks but the other question was closed as a duplicate for this one.
def splitkeep(s, delimiter):
split = s.split(delimiter)
return [substr + delimiter for substr in split[:-1]] + [split[-1]]
Random tests:
import random
CHARS = [".", "a", "b", "c"]
assert splitkeep("", "X") == [""] # 0 length test
for delimiter in ('.', '..'):
for _ in range(100000):
length = random.randint(1, 50)
s = "".join(random.choice(CHARS) for _ in range(length))
assert "".join(splitkeep(s, delimiter)) == s
# This keeps all separators in result
##########################################################################
import re
st="%%(c+dd+e+f-1523)%%7"
sh=re.compile('[\+\-//\*\<\>\%\(\)]')
def splitStringFull(sh, st):
ls=sh.split(st)
lo=[]
start=0
for l in ls:
if not l : continue
k=st.find(l)
llen=len(l)
if k> start:
tmp= st[start:k]
lo.append(tmp)
lo.append(l)
start = k + llen
else:
lo.append(l)
start =llen
return lo
#############################
li= splitStringFull(sh , st)
['%%(', 'c', '+', 'dd', '+', 'e', '+', 'f', '-', '1523', ')%%', '7']
replace all seperator: (\W) with seperator + new_seperator: (\W;)
split by the new_seperator: (;)
def split_and_keep(seperator, s):
return re.split(';', re.sub(seperator, lambda match: match.group() + ';', s))
print('\W', 'foo/bar spam\neggs')
If one wants to split string while keeping separators by regex without capturing group:
def finditer_with_separators(regex, s):
matches = []
prev_end = 0
for match in regex.finditer(s):
match_start = match.start()
if (prev_end != 0 or match_start > 0) and match_start != prev_end:
matches.append(s[prev_end:match.start()])
matches.append(match.group())
prev_end = match.end()
if prev_end < len(s):
matches.append(s[prev_end:])
return matches
regex = re.compile(r"[\(\)]")
matches = finditer_with_separators(regex, s)
If one assumes that regex is wrapped up into capturing group:
def split_with_separators(regex, s):
matches = list(filter(None, regex.split(s)))
return matches
regex = re.compile(r"([\(\)])")
matches = split_with_separators(regex, s)
Both ways also will remove empty groups which are useless and annoying in most of the cases.
install wrs "WITHOUT REMOVING SPLITOR" BY DOING
pip install wrs
(developed by Rao Hamza)
import wrs
text = "Now inbox “how to make spam ad” Invest in hard email marketing."
splitor = 'email | spam | inbox'
list = wrs.wr_split(splitor, text)
print(list)
result:
['now ', 'inbox “how to make ', 'spam ad” invest in hard ', 'email marketing.']
I had a similar issue trying to split a file path and struggled to find a simple answer.
This worked for me and didn't involve having to substitute delimiters back into the split text:
my_path = 'folder1/folder2/folder3/file1'
import re
re.findall('[^/]+/|[^/]+', my_path)
returns:
['folder1/', 'folder2/', 'folder3/', 'file1']
I found this generator based approach more satisfying:
def split_keep(string, sep):
"""Usage:
>>> list(split_keep("a.b.c.d", "."))
['a.', 'b.', 'c.', 'd']
"""
start = 0
while True:
end = string.find(sep, start) + 1
if end == 0:
break
yield string[start:end]
start = end
yield string[start:]
It avoids the need to figure out the correct regex, while in theory should be fairly cheap. It doesn't create new string objects and, delegates most of the iteration work to the efficient find method.
... and in Python 3.8 it can be as short as:
def split_keep(string, sep):
start = 0
while (end := string.find(sep, start) + 1) > 0:
yield string[start:end]
start = end
yield string[start:]
May I just leave it here
s = 'foo/bar spam\neggs'
print(s.replace('/', '+++/+++').replace(' ', '+++ +++').replace('\n', '+++\n+++').split('+++'))
['foo', '/', 'bar', ' ', 'spam', '\n', 'eggs']
Use re.split and also your regular expression comes from variable and also you have multi separator ,you can use as the following:
# BashSpecialParamList is the special param in bash,
# such as your separator is the bash special param
BashSpecialParamList = ["$*", "$#", "$#", "$?", "$-", "$$", "$!", "$0"]
# aStr is the the string to be splited
aStr = "$a Klkjfd$0 $? $#%$*Sdfdf"
reStr = "|".join([re.escape(sepStr) for sepStr in BashSpecialParamList])
re.split(f'({reStr})', aStr)
# Then You can get the result:
# ['$a Klkjfd', '$0', ' ', '$?', ' ', '$#', '%', '$*', 'Sdfdf']
reference: GNU Bash Special Parameters
Some of those answers posted before, will repeat delimiter, or have some other bugs which I faced in my case. You can use this function, instead:
def split_and_keep_delimiter(input, delimiter):
result = list()
idx = 0
while delimiter in input:
idx = input.index(delimiter);
result.append(input[0:idx+len(delimiter)])
input = input[idx+len(delimiter):]
result.append(input)
return result
In the below code, there is a simple, very efficient and well tested answer to this question. The code has comments explaining everything in it.
I promise it's not as scary as it looks - it's actually only 13 lines of code! The rest are all comments, docs and assertions
def split_including_delimiters(input: str, delimiter: str):
"""
Splits an input string, while including the delimiters in the output
Unlike str.split, we can use an empty string as a delimiter
Unlike str.split, the output will not have any extra empty strings
Conequently, len(''.split(delimiter))== 0 for all delimiters,
whereas len(input.split(delimiter))>0 for all inputs and delimiters
INPUTS:
input: Can be any string
delimiter: Can be any string
EXAMPLES:
>>> split_and_keep_delimiter('Hello World ! ',' ')
ans = ['Hello ', 'World ', ' ', '! ', ' ']
>>> split_and_keep_delimiter("Hello**World**!***", "**")
ans = ['Hello', '**', 'World', '**', '!', '**', '*']
EXAMPLES:
assert split_and_keep_delimiter('-xx-xx-','xx') == ['-', 'xx', '-', 'xx', '-'] # length 5
assert split_and_keep_delimiter('xx-xx-' ,'xx') == ['xx', '-', 'xx', '-'] # length 4
assert split_and_keep_delimiter('-xx-xx' ,'xx') == ['-', 'xx', '-', 'xx'] # length 4
assert split_and_keep_delimiter('xx-xx' ,'xx') == ['xx', '-', 'xx'] # length 3
assert split_and_keep_delimiter('xxxx' ,'xx') == ['xx', 'xx'] # length 2
assert split_and_keep_delimiter('xxx' ,'xx') == ['xx', 'x'] # length 2
assert split_and_keep_delimiter('x' ,'xx') == ['x'] # length 1
assert split_and_keep_delimiter('' ,'xx') == [] # length 0
assert split_and_keep_delimiter('aaa' ,'xx') == ['aaa'] # length 1
assert split_and_keep_delimiter('aa' ,'xx') == ['aa'] # length 1
assert split_and_keep_delimiter('a' ,'xx') == ['a'] # length 1
assert split_and_keep_delimiter('' ,'' ) == [] # length 0
assert split_and_keep_delimiter('a' ,'' ) == ['a'] # length 1
assert split_and_keep_delimiter('aa' ,'' ) == ['a', '', 'a'] # length 3
assert split_and_keep_delimiter('aaa' ,'' ) == ['a', '', 'a', '', 'a'] # length 5
"""
# Input assertions
assert isinstance(input,str), "input must be a string"
assert isinstance(delimiter,str), "delimiter must be a string"
if delimiter:
# These tokens do not include the delimiter, but are computed quickly
tokens = input.split(delimiter)
else:
# Edge case: if the delimiter is the empty string, split between the characters
tokens = list(input)
# The following assertions are always true for any string input and delimiter
# For speed's sake, we disable this assertion
# assert delimiter.join(tokens) == input
output = tokens[:1]
for token in tokens[1:]:
output.append(delimiter)
if token:
output.append(token)
# Don't let the first element be an empty string
if output[:1]==['']:
del output[0]
# The only case where we should have an empty string in the output is if it is our delimiter
# For speed's sake, we disable this assertion
# assert delimiter=='' or '' not in output
# The resulting strings should be combinable back into the original string
# For speed's sake, we disable this assertion
# assert ''.join(output) == input
return output
>>> line = 'hello_toto_is_there'
>>> sep = '_'
>>> [sep + x[1] if x[0] != 0 else x[1] for x in enumerate(line.split(sep))]
['hello', '_toto', '_is', '_there']

Categories