Python ValueError: too many values to unpack (expected 3) - python

I am getting that exception from this code:
from collections import Counter, ChainMap
from itertools import chain
import re
import pickle
class EmoLex(object):
def __init__(self, emolex_filepath=None):
if emolex_filepath:
with open(emolex_filepath) as emolex_file:
self.parser = self._load_and_parse(emolex_file)
def __len__(self):
return len(self.keys())
def keys(self):
return self._parser_keys()
def _parser_keys(self):
return self.parser.keys
def categorize_token(self, token):
return self.parser[token.lower()]
def annotate_doc(self, doc):
return [ self.categorize_token(word.lower()) for word in doc ]
def summarize_doc(self, doc):
annotation = self.annotate_doc(doc)
# return just the summarization
return self.summarize_annotation(annotation, doc)
def summarize_annotation(self, annotation, doc):
wc = len([w for w in doc if re.match('\w+', w)])
ctr = Counter(list(self._flatten_list_of_sets(annotation)))
# Convert to percentiles
summary = {k: float(v)/float(wc) for (k,v) in dict(ctr).items()}
# Set keys that did not occur to 0
not_counted = { k: 0.0 for k in
self._parser_keys() - set(summary.keys()) }
# Merge the two dictionaries
return dict(ChainMap(summary, not_counted))
def load(self, pickle_filepath):
with open(pickle_filepath, 'rb') as pickle_file:
self.parser = pickle.load(pickle_file)
def dump(self, pickle_filepath):
with open(pickle_filepath, 'wb') as pickle_file:
pickle.dump(self.parser, pickle_file)
l_of_s: List[Set[str]] -> generator List[str]
def _flatten_list_of_sets(self, l_of_s):
return chain.from_iterable([ list(categories)
for categories in l_of_s ])
def _load_and_parse(self, emolex_file):
return NrcDiscreteParser(emolex_file.read().splitlines())
#EmoLex(emolex_filepath="/Users/sakshigupta/Dropbox/Sakshi_July_2021/reports_Old_and_New/NRC-VAD-Lexicon.txt")
lexicon = EmoLex("/Users/sakshigupta/Dropbox/Sakshi_July_2021/reports_Old_and_New/NRC-VAD-Lexicon.txt")

Which function are you calling?
This exception is seen when the number of returned values are more than the number of expected values.
Check the example given below
def dummy_function():
return 1,2,3
On calling the above function with only two return variables
a,b = dummy_function()
I get this ->
ValueError: too many values to unpack (expected 2)
This means that on the caller end I am expecting only two values but the functioning is returning more than 2

Related

Python ThreadPoolExecutor is faster than a loop for CPU-bound task. How come?

Recently I've been working on a project, and found behaviour that I don't understand. We have endpoint that fetches documents from mongodb and then applies transformation to each document, replacing some symbols in data using regex. And what's bothering me is that for 7400 documents applying transformation function in a regular loop takes 6 seconds to finish. And using ThreadPoolExecutor.map finishes in 3.4 seconds. As far as I know GIL prevents python interpreter from running more that one thread simultaneously, so for CPU-bound tasks should run slower in ThreadPoolExecutor than in regular loop. But this is not a case here. How so? I'm assuming that re operations somehow release GIL but not sure. Here is code:
# parts that are responsible for substituting symbols in mongo document
class Converter:
def __init__(self, reverse=False):
self.reverse = reverse
def key_convert(self, key, reverse, path):
return key
def value_convert(self, value, reverse, path):
return value
def recurse(self, data, path=tuple()):
if isinstance(data, Mapping):
_data = {}
for k, v in data.items():
next_path = path + (k,)
key = self.key_convert(k, self.reverse, path)
value = self.value_convert(v, self.reverse, next_path)
_data[key] = self.recurse(value, next_path)
return _data
elif isinstance(data, Iterable) and not isinstance(data, (str, bytes)):
return [
self.recurse(it, path + (idx,))
for idx, it in enumerate(data)]
return self.value_convert(data, self.reverse, path)
def convert(self, data):
self.reverse = False
return self.recurse(data)
def unconvert(self, data):
self.reverse = True
return self.recurse(data)
class MongoConverter(Converter):
_to_mongo = ((rec(r"\."), "\u00B7"), (rec(r"^\$"), "#"),)
_from_mongo = ((rec("\u00B7"), "."), (rec("^#"), "$"),)
def key_convert_to_mongo(self, key):
return pattern_substitute(key, self._to_mongo)
def key_convert_from_mongo(self, key):
return pattern_substitute(key, self._from_mongo)
def key_convert(self, key, reverse, _):
if reverse:
return self.key_convert_from_mongo(key)
return self.key_convert_to_mongo(key)
def value_convert(self, value, reverse, path):
if reverse:
return value
return as_datetime(value)
def pattern_substitute(value, pattern_substitutes):
for pattern, substitute in pattern_substitutes:
value = pattern.sub(substitute, value)
return value
# storage adapter
class MongoStorage:
def __init__(self, collection, converter=None):
self.collection = collection
self.converter = converter if converter else MongoConverter()
self._context = None
self.executor = ThreadPoolExecutor()
def after_find(self, data):
if data is not None:
return self.converter.unconvert(data)
def find(self, filtr=None, limit=-1, **kwargs):
filtr = filter_converter.convert(filtr)
if limit == 0:
return []
if limit == -1:
limit = 0
# this part is what I'm asking about. Regular loop here is slower
return self.executor.map(
self.after_find,
self.collection.find(filtr, limit=limit, **kwargs)
)
Thank you for answers.

Adding class objects to Pytorch Dataloader: batch must contain tensors

I have a custom Pytorch dataset that returns a dictionary containing a class object "queries".
class QueryDataset(torch.utils.data.Dataset):
def __init__(self, queries, values, targets):
super(QueryDataset).__init__()
self.queries = queries
self.values = values
self.targets = targets
def __len__(self):
return self.values.shape[0]
def __getitem__(self, idx):
sample = DeviceDict({'query': self.queries[idx],
"values": self.values[idx],
"targets": self.targets[idx]})
return sample
The problem is that when I put the queries in a data loader I get default_collate: batch must contain tensors, numpy arrays, numbers, dicts or lists; found <class 'query.Query'>. Is there a way to have a class object in my data loader? It blows up at next(iterator) in the code below.
train_queries = QueryDataset(train_queries)
train_loader = torch.utils.data.DataLoader(train_queries,
batch_size=10],
shuffle=True,
drop_last=False)
for i in range(epochs):
iterator = iter(train_loader)
for i in range(len(train_loader)):
batch = next(iterator)
out = model(batch)
loss = criterion(out["pred"], batch["targets"])
self.optimizer.zero_grad()
loss.sum().backward()
self.optimizer.step()
You need to define your own colate_fn in order to do this.
A sloppy approach just to show you how stuff works here, would be something like this:
import torch
class DeviceDict:
def __init__(self, data):
self.data = data
def print_data(self):
print(self.data)
class QueryDataset(torch.utils.data.Dataset):
def __init__(self, queries, values, targets):
super(QueryDataset).__init__()
self.queries = queries
self.values = values
self.targets = targets
def __len__(self):
return 5
def __getitem__(self, idx):
sample = {'query': self.queries[idx],
"values": self.values[idx],
"targets": self.targets[idx]}
return sample
def custom_collate(dict):
return DeviceDict(dict)
dt = QueryDataset("q","v","t")
dl = torch.utils.data.DataLoader(dtt,batch_size=1,collate_fn=custom_collate)
t = next(iter(dl))
t.print_data()
Basically colate_fn allows you to achieve custom batching or adding support for custom data types as explained in the link I previously provided.
As you see it just shows the concept, you need to change it based on your own needs.
For those curious, this is the DeviceDict and custom collate function that I used to get things to work.
class DeviceDict(dict):
def __init__(self, *args):
super(DeviceDict, self).__init__(*args)
def to(self, device):
dd = DeviceDict()
for k, v in self.items():
if torch.is_tensor(v):
dd[k] = v.to(device)
else:
dd[k] = v
return dd
def collate_helper(elems, key):
if key == "query":
return elems
else:
return torch.utils.data.dataloader.default_collate(elems)
def custom_collate(batch):
elem = batch[0]
return DeviceDict({key: collate_helper([d[key] for d in batch], key) for key in elem})

Scrambling numbers

I am trying to program an algorithm that scrambles and "unscrambles" integer numbers.
I need two functions forward and backward
backward(number): return a "random" number between 0 and 9, the same input number always returns the same output
forward(number): return the input to backward that returns number
I managed to solve the problem like this:
from random import randint
class Scrambler:
def __init__(self):
self.mapping = [i for i in range(10)]
# scramble mapping
for i in range(1000):
r1 = randint(0, len(self.mapping) - 1)
r2 = randint(0, len(self.mapping) - 1)
temp = self.mapping[r1]
self.mapping[r1] = self.mapping[r2]
self.mapping[r2] = temp
def backward(self, num):
return self.mapping[num]
def forward(self, num):
return self.mapping.index(num)
if __name__ == '__main__':
s = Scrambler()
print(s.mapping)
for i in range(len(s.mapping)):
print(i, s.forward(i), s.backward(i), s.forward(s.backward(i)), s.backward(s.forward(i)))
Is there a way to do this without using the mapping list?
Can i calculate the return value of the functions forward and backward?
The "randomness" of the numbers does not need to be perfect.
I think your current solution is better than coming up with a function each time. It is a good solution.
Here is a generic solution for a generic key. You'd make your version using the Cipher.random_range method I've stuck on.
import random
class Cipher:
def __init__(self, key):
"""
key is a dict of unique values (i.e. bijection)
"""
if len(set(key.values())) != len(key):
raise ValueError('key values are not unique')
self._encoder = key.copy()
self._decoder = {v: k for k, v in key.items()}
#classmethod
def random_range(cls, max):
lst = list(range(max))
random.shuffle(lst)
return cls(dict(enumerate(lst)))
def encode(self, num):
return self._encoder[num]
def decode(self, num):
return self._decoder[num]

How to effiiciently rebuild pandas hdfstore table when append fails

I am working on using the hdfstore in pandas to data frames from an ongoing iterative process. At each iteration, I append to a table in the hdfstore. Here is a toy example:
import pandas as pd
from pandas import HDFStore
import numpy as np
from random import choice
from string import ascii_letters
alphanum=np.array(list(ascii_letters)+range(0,9))
def hdfstore_append(storefile,key,df,format="t",columns=None,data_columns=None):
if df is None:
return
if key[0]!='/':
key='/'+key
with HDFStore(storefile) as store:
if key not in store.keys():
store.put(key,df,format=format,columns=columns,data_columns=data_columns)
else:
try:
store.append(key,df)
except Exception as inst:
df = pd.concat([store.get(key),df])
store.put(key,df,format=format,columns=columns,
data_columns=data_columns)
storefile="db.h5"
for i in range(0,100):
df=pd.DataFrame([dict(n=np.random.randn(),
s=''.join(alphanum[np.random.randint(1,len(alphanum),np.random.randint(1,2*(i+1))]))],index=[i])
hdfstore_append(storefile,'/SO/df',df,columns=df.columns,data_columns=True)
The hdfstore_append function guards against the various exceptions hdfstore.append throws, and rebuilds the table when necessary. The issue with this approach is that it gets very slow when the table in the store becomes very large.
Is there a more efficient way to do this?
Below is an example of an efficient method for building large pandas hdfstores. The key is to cache the frame numbers when the tables becomes large. Also instead of appending, removing pre-existing data will essentially create a put.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import logging
import os
from abc import ABCMeta, abstractmethod, abstractproperty
import warnings
import pandas as pd
logger = logging.getLogger(__name__)
class FramewiseData(object):
"Abstract base class defining a data container with framewise access."
__metaclass__ = ABCMeta
#abstractmethod
def put(self, df):
pass
#abstractmethod
def get(self, frame_no):
pass
#abstractproperty
def frames(self):
pass
#abstractmethod
def close(self):
pass
#abstractproperty
def t_column(self):
pass
def __getitem__(self, frame_no):
return self.get(frame_no)
def __len__(self):
return len(self.frames)
def dump(self, N=None):
"""Return data from all, or the first N, frames in a single DataFrame
Parameters
----------
N : integer
optional; if None, return all frames
Returns
-------
DataFrame
"""
if N is None:
return pd.concat(iter(self))
else:
i = iter(self)
return pd.concat((next(i) for _ in range(N)))
#property
def max_frame(self):
return max(self.frames)
def _validate(self, df):
if self.t_column not in df.columns:
raise ValueError("Cannot write frame without a column "
"called {0}".format(self.t_column))
if df[self.t_column].nunique() != 1:
raise ValueError("Found multiple values for 'frame'. "
"Write one frame at a time.")
def __iter__(self):
return self._build_generator()
def _build_generator(self):
for frame_no in self.frames:
yield self.get(frame_no)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
KEY_PREFIX = 'Frame_'
len_key_prefix = len(KEY_PREFIX)
def code_key(frame_no):
"Turn the frame_no into a 'natural name' string idiomatic of HDFStore"
key = '{0}{1}'.format(KEY_PREFIX, frame_no)
return key
def decode_key(key):
frame_no = int(key[len_key_prefix:])
return frame_no
class PandasHDFStore(FramewiseData):
"""An interface to an HDF5 file with framewise access, using pandas.
Save each frame's data to a node in a pandas HDFStore.
Any additional keyword arguments to the constructor are passed to
pandas.HDFStore().
"""
def __init__(self, filename, mode='a', t_column='frame', **kwargs):
self.filename = os.path.abspath(filename)
self._t_column = t_column
self.store = pd.HDFStore(self.filename, mode, **kwargs)
#property
def t_column(self):
return self._t_column
#property
def max_frame(self):
return max(self.frames)
def put(self, df):
if len(df) == 0:
warnings.warn('An empty DataFrame was passed to put(). Continuing.')
return
frame_no = df[self.t_column].values[0] # validated to be all the same
key = code_key(frame_no)
# Store data as tabular instead of fixed-format.
# Make sure remove any prexisting data, so don't really 'append'.
try:
self.store.remove(key)
except KeyError:
pass
self.store.put(key, df, format='table')
def get(self, frame_no):
key = code_key(frame_no)
frame = self.store.get(key)
return frame
#property
def frames(self):
"""Returns sorted list of integer frame numbers in file"""
return self._get_frame_nos()
def _get_frame_nos(self):
"""Returns sorted list of integer frame numbers in file"""
# Pandas' store.keys() scans the entire file looking for stored Pandas
# structures. This is very slow for large numbers of frames.
# Instead, scan the root level of the file for nodes with names
# matching our scheme; we know they are DataFrames.
r = [decode_key(key) for key in self.store.root._v_children.keys() if
key.startswith(KEY_PREFIX)]
r.sort()
return r
def close(self):
self.store.close()
class PandasHDFStoreBig(PandasHDFStore):
"""Like PandasHDFStore, but keeps a cache of frame numbers.
This can give a large performance boost when a file contains thousands
of frames.
If a file was made in PandasHDFStore, opening it with this class
and then closing it will add a cache (if mode != 'r').
Any additional keyword arguments to the constructor are passed to
pandas.HDFStore().
"""
def __init__(self, filename, mode='a', t_column='frame', **kwargs):
self._CACHE_NAME = '_Frames_Cache'
self._frames_cache = None
self._cache_dirty = False # Whether _frames_cache needs to be written out
super(PandasHDFStoreBig, self).__init__(filename, mode, t_column,
**kwargs)
#property
def frames(self):
# Hit memory cache, then disk cache
if self._frames_cache is not None:
return self._frames_cache
else:
try:
self._frames_cache = list(self.store[self._CACHE_NAME].index.values)
self._cache_dirty = False
except KeyError:
self._frames_cache = self._get_frame_nos()
self._cache_dirty = True # In memory, but not in file
return self._frames_cache
def put(self, df):
self._invalidate_cache()
super(PandasHDFStoreBig, self).put(df)
def rebuild_cache(self):
"""Delete cache on disk and rebuild it."""
self._invalidate_cache()
_ = self.frames # Compute cache
self._flush_cache()
def _invalidate_cache(self):
self._frames_cache = None
try:
del self.store[self._CACHE_NAME]
except KeyError: pass
def _flush_cache(self):
"""Writes frame cache if dirty and file is writable."""
if (self._frames_cache is not None and self._cache_dirty
and self.store.root._v_file._iswritable()):
self.store[self._CACHE_NAME] = pd.DataFrame({'dummy': 1},
index=self._frames_cache)
self._cache_dirty = False
def close(self):
"""Updates cache, writes if necessary, then closes file."""
if self.store.root._v_file._iswritable():
_ = self.frames # Compute cache
self._flush_cache()
super(PandasHDFStoreBig, self).close()
class PandasHDFStoreSingleNode(FramewiseData):
"""An interface to an HDF5 file with framewise access,
using pandas, that is faster for cross-frame queries.
This implementation is more complex than PandasHDFStore,
but it simplifies (speeds up?) cross-frame queries,
like queries for a single probe's entire trajectory.
Any additional keyword arguments to the constructor are passed to
pandas.HDFStore().
"""
def __init__(self, filename, key='FrameData', mode='a', t_column='frame',
use_tabular_copy=False, **kwargs):
self.filename = os.path.abspath(filename)
self.key = key
self._t_column = t_column
self.store = pd.HDFStore(self.filename, mode, **kwargs)
with pd.get_store(self.filename) as store:
try:
store[self.key]
except KeyError:
pass
else:
self._validate_node(use_tabular_copy)
#property
def t_column(self):
return self._t_column
def put(self, df):
if len(df) == 0:
warnings.warn('An empty DataFrame was passed to put(). Continuing.')
return
self._validate(df)
self.store.append(self.key, df, data_columns=True)
def get(self, frame_no):
frame = self.store.select(self.key, '{0} == {1}'.format(
self._t_column, frame_no))
return frame
def dump(self, N=None):
"""Return data from all, or the first N, frames in a single DataFrame
Parameters
----------
N : integer
optional; if None, return all frames
Returns
-------
DataFrame
"""
if N is None:
return self.store.select(self.key)
else:
Nth_frame = self.frames[N - 1]
return self.store.select(self.key, '{0} <= {1}'.format(
self._t_column, Nth_frame))
def close(self):
self.store.close()
def __del__(self):
if hasattr(self, 'store'):
self.close()
#property
def frames(self):
"""Returns sorted list of integer frame numbers in file"""
# I assume one column can fit in memory, which is not ideal.
# Chunking does not seem to be implemented for select_column.
frame_nos = self.store.select_column(self.key, self.t_column).unique()
frame_nos.sort()
return frame_nos
def _validate_node(self, use_tabular_copy):
# The HDFStore might be non-tabular, which means we cannot select a
# subset, and this whole structure will not work.
# For convenience, this can rewrite the table into a tabular node.
if use_tabular_copy:
self.key = _make_tabular_copy(self.filename, self.key)
pandas_type = getattr(getattr(getattr(
self.store._handle.root, self.key, None), '_v_attrs', None),
'pandas_type', None)
if not pandas_type == 'frame_table':
raise ValueError("This node is not tabular. Call with "
"use_tabular_copy=True to proceed.")
def _make_tabular_copy(store, key):
"""Copy the contents nontabular node in a pandas HDFStore
into a tabular node"""
tabular_key = key + '/tabular'
logger.info("Making a tabular copy of %s at %s", (key, tabular_key))
store.append(tabular_key, store.get(key), data_columns=True)
return tabular_key

Adapt an iterator to behave like a file-like object in Python

I have a generator producing a list of strings. Is there a utility/adapter in Python that could make it look like a file?
For example,
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> for s in str_fn():
... print s
...
aaa
bbb
ccc
>>> stream = some_magic_adaptor(str_fn())
>>> while True:
... data = stream.read(4)
... if not data:
... break
... print data
aaab
bbcc
c
Because data may be big and needs to be streamable (each fragment is a few kilobytes, the entire stream is tens of megabytes), I do not want to eagerly evaluate the whole generator before passing it to stream adaptor.
The "correct" way to do this is inherit from a standard Python io abstract base class. However it doesn't appear that Python allows you to provide a raw text class, and wrap this with a buffered reader of any kind.
The best class to inherit from is TextIOBase. Here's such an implementation, handling readline, and read while being mindful of performance. (gist)
import io
class StringIteratorIO(io.TextIOBase):
def __init__(self, iter):
self._iter = iter
self._left = ''
def readable(self):
return True
def _read1(self, n=None):
while not self._left:
try:
self._left = next(self._iter)
except StopIteration:
break
ret = self._left[:n]
self._left = self._left[len(ret):]
return ret
def read(self, n=None):
l = []
if n is None or n < 0:
while True:
m = self._read1()
if not m:
break
l.append(m)
else:
while n > 0:
m = self._read1(n)
if not m:
break
n -= len(m)
l.append(m)
return ''.join(l)
def readline(self):
l = []
while True:
i = self._left.find('\n')
if i == -1:
l.append(self._left)
try:
self._left = next(self._iter)
except StopIteration:
self._left = ''
break
else:
l.append(self._left[:i+1])
self._left = self._left[i+1:]
break
return ''.join(l)
Here's a solution that should read from your iterator in chunks.
class some_magic_adaptor:
def __init__( self, it ):
self.it = it
self.next_chunk = ""
def growChunk( self ):
self.next_chunk = self.next_chunk + self.it.next()
def read( self, n ):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk)<n:
self.growChunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
def str_fn():
for c in 'a', 'b', 'c':
yield c * 3
ff = some_magic_adaptor( str_fn() )
while True:
data = ff.read(4)
if not data:
break
print data
The problem with StringIO is that you have to load everything into the buffer up front. This can be a problem if the generator is infinite :)
from itertools import chain, islice
class some_magic_adaptor(object):
def __init__(self, src):
self.src = chain.from_iterable(src)
def read(self, n):
return "".join(islice(self.src, None, n))
Here's a modified version of John and Matt's answer that can read a list/generator of strings and output bytearrays
import itertools as it
from io import TextIOBase
class IterStringIO(TextIOBase):
def __init__(self, iterable=None):
iterable = iterable or []
self.iter = it.chain.from_iterable(iterable)
def not_newline(self, s):
return s not in {'\n', '\r', '\r\n'}
def write(self, iterable):
to_chain = it.chain.from_iterable(iterable)
self.iter = it.chain.from_iterable([self.iter, to_chain])
def read(self, n=None):
return bytearray(it.islice(self.iter, None, n))
def readline(self, n=None):
to_read = it.takewhile(self.not_newline, self.iter)
return bytearray(it.islice(to_read, None, n))
usage:
ff = IterStringIO(c * 3 for c in ['a', 'b', 'c'])
while True:
data = ff.read(4)
if not data:
break
print data
aaab
bbcc
c
alternate usage:
ff = IterStringIO()
ff.write('ddd')
ff.write(c * 3 for c in ['a', 'b', 'c'])
while True:
data = ff.read(4)
if not data:
break
print data
ddda
aabb
bccc
There is one called werkzeug.contrib.iterio.IterIO but note that it stores the entire iterator in its memory (up to the point you have read it as a file) so it might not be suitable.
http://werkzeug.pocoo.org/docs/contrib/iterio/
Source: https://github.com/mitsuhiko/werkzeug/blob/master/werkzeug/contrib/iterio.py
An open bug on readline/iter: https://github.com/mitsuhiko/werkzeug/pull/500
Looking at Matt's answer, I can see that it's not always necessary to implement all the read methods. read1 may be sufficient, which is described as:
Read and return up to size bytes, with at most one call to the underlying raw stream’s read()...
Then it can be wrapped with io.TextIOWrapper which, for instance, has implementation of readline. As an example here's streaming of CSV-file from S3's (Amazon Simple Storage Service) boto.s3.key.Key which implements iterator for reading.
import io
import csv
from boto import s3
class StringIteratorIO(io.TextIOBase):
def __init__(self, iter):
self._iterator = iter
self._buffer = ''
def readable(self):
return True
def read1(self, n=None):
while not self._buffer:
try:
self._buffer = next(self._iterator)
except StopIteration:
break
result = self._buffer[:n]
self._buffer = self._buffer[len(result):]
return result
conn = s3.connect_to_region('some_aws_region')
bucket = conn.get_bucket('some_bucket')
key = bucket.get_key('some.csv')
fp = io.TextIOWrapper(StringIteratorIO(key))
reader = csv.DictReader(fp, delimiter = ';')
for row in reader:
print(row)
Update
Here's an answer to related question which looks a little better. It inherits io.RawIOBase and overrides readinto. In Python 3 it's sufficient, so instead of wrapping IterStream in io.BufferedReader one can wrap it in io.TextIOWrapper. In Python 2 read1 is needed but it can be simply expressed though readinto.
If you only need a read method, then this can be enough
def to_file_like_obj(iterable, base):
chunk = base()
offset = 0
it = iter(iterable)
def up_to_iter(size):
nonlocal chunk, offset
while size:
if offset == len(chunk):
try:
chunk = next(it)
except StopIteration:
break
else:
offset = 0
to_yield = min(size, len(chunk) - offset)
offset = offset + to_yield
size -= to_yield
yield chunk[offset - to_yield:offset]
class FileLikeObj:
def read(self, size=-1):
return base().join(up_to_iter(float('inf') if size is None or size < 0 else size))
return FileLikeObj()
which can be used for an iterable yielding str
my_file = to_file_like_object(str_fn, str)
or if you have an iterable yielding bytes rather than str, and you want a file-like object whose read method returns bytes
my_file = to_file_like_object(bytes_fn, bytes)
This pattern has a few nice properties I think:
Not much code, which can be used for both str and bytes
Returns exactly what has been asked for in terms of length, in both of the cases of the iterable yielding small chunks, and big chunks (other than at the end of the iterable)
Does not append str/bytes - so avoids copying
Leverages slicing - so also avoids copying because a slice of a str/bytes that should be the entire instance will return exactly that same instance
For the bytes case, it's enough of a file-like object to pass through to boto3's upload_fileobj for multipart upload to S3
this is exactly what stringIO is for ..
>>> import StringIO
>>> some_var = StringIO.StringIO("Hello World!")
>>> some_var.read(4)
'Hell'
>>> some_var.read(4)
'o Wo'
>>> some_var.read(4)
'rld!'
>>>
Or if you wanna do what it sounds like
Class MyString(StringIO.StringIO):
def __init__(self,*args):
StringIO.StringIO.__init__(self,"".join(args))
then you can simply
xx = MyString(*list_of_strings)

Categories