I was loading the InceptionV3 model from Keras for the first time and it took a long time due to my low processing power and it had me thinking about which program is responsible for the calculation of ETA displaying the bar?
InceptionV3_base_model = InceptionV3(weights='imagenet', include_top=False)
>>
Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5
37036032/87910968 [===========>..................] - ETA: 37s
Which program is calculating and displaying these? is it Keras, Jupyter or the Linux itself calculating?
Take keras.datasets.mnist as an example. (Because it's also showing a progress bar.)
Source code:
"""MNIST handwritten digits dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..utils.data_utils import get_file
import numpy as np
def load_data(path='mnist.npz'):
"""Loads the MNIST dataset.
# Arguments
path: path where to cache the dataset locally
(relative to ~/.keras/datasets).
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
path = get_file(path,
origin='https://s3.amazonaws.com/img-datasets/mnist.npz',
file_hash='8a61469f7ea1b51cbae51d4f78837e45')
with np.load(path, allow_pickle=True) as f:
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
return (x_train, y_train), (x_test, y_test)
And we know the bar comes from ..utils.data_utils.get_file
keras.utils.__init__.py looks like this:
from __future__ import absolute_import
from . import np_utils
from . import generic_utils
from . import data_utils
from . import io_utils
from . import conv_utils
from . import losses_utils
from . import metrics_utils
# Globally-importable utils.
from .io_utils import HDF5Matrix
from .io_utils import H5Dict
from .data_utils import get_file
from .data_utils import Sequence
from .data_utils import GeneratorEnqueuer
from .data_utils import OrderedEnqueuer
from .generic_utils import CustomObjectScope
from .generic_utils import custom_object_scope
from .generic_utils import get_custom_objects
from .generic_utils import serialize_keras_object
from .generic_utils import deserialize_keras_object
from .generic_utils import Progbar
from .layer_utils import convert_all_kernels_in_model
from .layer_utils import get_source_inputs
from .layer_utils import print_summary
from .vis_utils import model_to_dot
from .vis_utils import plot_model
from .np_utils import to_categorical
from .np_utils import normalize
from .multi_gpu_utils import multi_gpu_model
get_file comes from keras.data_utils
keras.data_utils.py:
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import multiprocessing as mp
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import warnings
import zipfile
from abc import abstractmethod
from contextlib import closing
from multiprocessing.pool import ThreadPool
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
try:
import queue
except ImportError:
import Queue as queue
from ..utils.generic_utils import Progbar
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrieve` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
# Arguments
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
with closing(urlopen(url, data)) as response, open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
# Arguments
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
# Returns
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
# Arguments
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored).
# Returns
Path to the downloaded file
""" # noqa
if cache_dir is None:
if 'KERAS_HOME' in os.environ:
cache_dir = os.environ.get('KERAS_HOME')
else:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' +
file_hash + ' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {} : {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
# Example
```python
>>> from keras.utils.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
# Arguments
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
The file hash
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
# Arguments
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
Whether the file is valid
"""
if ((algorithm == 'sha256') or
(algorithm == 'auto' and len(file_hash) == 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`. The method `__getitem__` should return a complete batch.
# Notes
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once on each sample per epoch which is not
the case with generators.
# Examples
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
use_sequence_api = True
#abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
# Arguments
index: position of the batch in the Sequence.
# Returns
A batch
"""
raise NotImplementedError
#abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
# Returns
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
# Arguments
uid: int, Sequence identifier
i: index
# Returns
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
# Examples
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = mp.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Send current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
#abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
#abstractmethod
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
raise NotImplementedError
#abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
sequence: A `keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
self.end_of_epoch_signal = threading.Event()
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool,
initargs=(seqs,))
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
while True:
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
future = executor.apply_async(get_index, (self.uid, i))
future.idx = i
self.queue.put(future, block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
# communicate on_epoch_end to the main thread
self.end_of_epoch_signal.set()
def join_end_of_epoch(self):
self.end_of_epoch_signal.wait(timeout=30)
self.end_of_epoch_signal.clear()
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
try:
future = self.queue.get(block=True)
inputs = future.get(timeout=30)
except mp.TimeoutError:
idx = future.idx
warnings.warn(
'The input {} could not be retrieved.'
' It could be because a worker has died.'.format(idx),
UserWarning)
inputs = self.sequence[idx]
finally:
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception:
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
if random_seed is not None:
ident = mp.current_process().ident
np.random.seed(random_seed + ident)
def next_sample(uid):
"""Get the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
# Arguments
uid: int, generator identifier
# Returns
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
sequence: a sequence function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, sequence, use_multiprocessing=False, wait_time=None,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing)
self.random_seed = random_seed
if wait_time is not None:
warnings.warn('`wait_time` is not used anymore.',
DeprecationWarning)
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool_generator,
initargs=(seqs, self.random_seed))
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
try:
future = self.queue.get(block=True)
inputs = future.get(timeout=30)
self.queue.task_done()
except mp.TimeoutError:
warnings.warn(
'An input could not be retrieved.'
' It could be because a worker has died.'
'We do not have any information on the lost sample.',
UserWarning)
continue
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
list(map(lambda f: f.wait(), last_ones))
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
"Your generator is NOT thread-safe."
"Keras requires a thread-safe generator when"
"`use_multiprocessing=False, workers > 1`."
"For more information see issue #1638.")
six.reraise(*sys.exc_info())
That's where it comes from.
So progress bar in get_file is rendered by ..utils.generic_utils.Progbar, which is keras itself.
I have a script which runs through lines of input to find the occurrence of an ID string while keeping track of the linenumber.
Then it runs backwards up the input to trace parentID/childID relationships. The script accepts either a logfile using a '-f' flag as an argument or the contents of stdin from a pipe.
The logfile as input portion works just fine, but reading from stdin seems not to work.
For the sake of reasonable clarity I've included the portion of the script that this concerns, but don't expect to be able to run it. It's just to show you sorta whats going on (anyone who works in financial services around FIX protocol would recognize a few things):
import os
import sys
import linecache
from types import *
from ____ import FixMessage # custom message class that is used throughout
# Feel free to ignore all the getArgs and validation crap
def getArgs():
import argparse
parser = argparse.ArgumentParser(
description='Get amendment history.')
parser.add_argument('-f', '--file',
help="input logfile.'")
args = parser.parse_args()
return validateArgs(args)
def validateArgs(args):
try:
if sys.stdin.isatty():
if args.file:
assert os.path.isfile(args.file.strip('\n')), \
'File "{0}" does not exist'.format(args.file)
args.file = open(args.file, 'r')
else:
args.file = sys.stdin
assert args.file, \
"Please either include a file with '-f' or pipe some text in"
except AssertionError as err:
print err
exit(1)
return args
defGetMessageTrail(logfile, orderId):
# some input validation
if isinstance(logfile, StringType):
try: logfile = open(logfile, 'r')
except IOError as err: exit(1)
elif not isinstance(logfile, FileType):
raise TypeError(
'Expected FileType and got {0}'.format(type(logfile)))
linenum = 0
# This retrieves the message containing the orderID as well as the linenum
for line in logfile:
linenum += 1
if orderId in line:
# FixMessage is a custom class that is treated here like
# a dictionary with some metadata
# Missing dict keys return 'None'
# .isvalid is bool results of some text validation
# .direction is either incoming or outgoing
# thats all you really need to know
msg = FixMessage(line)
if msg.isvalid and msg.direction == 'Incoming':
yield msg
break
# If there is a message parentID, it would be in msg['41']
if msg['41']:
messages = findParentMessages(logfile, startline=linenum, msg['41'])
for msg in messages: yield msg
def findParentMessages(logfile, startline, targetId):
# Some more input validation
assert isinstance(logfile, FileType)
assert isinstance(startline, IntType)
assert isinstance(targetId, StringType)
# should just make a integer decrementing generator,
# but this is fine for the example
for linenum in range(startline)[::-1]:
# *** This is where the question lies... ***
# print(logfile.name) # returns "<stdin>"
line = linecache.getline(logfile.name, linenum)
if 'Incoming' in line and '11=' + targetId in line:
msg = FixMessage(line)
yield msg
if msg['41']: findParentMessages(logfile, linenum, msg['41'])
else: break
def main():
log = getArgs().file
trail = getMessageTrail(log, 'ORDER123')
if __name__ == '__main__': main()
The question is, how does linecache.getline work when it comes to reading stdin as a file? is it different than how it would work if given a regular filename?
linecache.getline() accepts a file name, not a file object. It is not designed to work that way as filename is passed to calls like open() and os.stat().
For reference: https://github.com/python/cpython/blob/2.6/Lib/linecache.py
I'm having trouble with receiving and sending data with Python's socket. In my script I need to listen to incoming data in socket and reading a FIFO file for a response and send it with socket when I find \n. I created separate thread for reading FIFO and it works but sometimes it is really slow. Is it possible to do both things in a main thread? My code:
#!/usr/bin/python
from __future__ import absolute_import, print_function, unicode_literals
from optparse import OptionParser, make_option
import os
import errno
import sys
import socket
import uuid
import dbus
import dbus.service
import dbus.mainloop.glib
import time
from threading import Thread
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
class ArduinoFifo:
fifofile = -1
OUT_PIPE_FILE = '/tmp/ble_pipe_out'
def removeFile(self, filename):
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
print(e)
raise # re-raise exception if a different error occured
def createFifo(self):
print('removing pipe file\n')
self.removeFile(self.OUT_PIPE_FILE)
print('making pipe\n')
try:
os.mkfifo(self.OUT_PIPE_FILE, 0777)
except OSError as err:
print (err)
raise
def openFifo(self):
print('waiting to open pipe\n')
try:
self.fifofile = os.open(self.OUT_PIPE_FILE, os.O_WRONLY) # | os.O_NONBLOCK)
except OSError as err:
print (err)
def writeFifo(self, data):
try:
if (self.fifofile == -1):
openFifo(self)
os.write(self.fifofile, data)
except OSError as err:
print (err)
class FIFOReader(Thread):
def __init__(self, server_sock):
super(FIFOReader, self).__init__()
self.server_sock = server_sock
self.daemon = True
self.received_msg = ""
self.cancelled = False
print('remove in fifo')
try:
os.remove("/tmp/ble_pipe_in")
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
print(e)
raise
print('create in fifo')
try:
os.mkfifo("/tmp/ble_pipe_in", 0777)
except OSError as err:
print (err)
raise
print('open in fifo')
try:
self.fifofile = os.open("/tmp/ble_pipe_in", os.O_RDWR)
except OSError as err:
print (err)
print('fifo in opened')
def run(self):
while not self.cancelled:
print("READING")
self.received_msg += os.read(self.fifofile, 1)
print("read: %s\n" % self.received_msg)
if "\n" in self.received_msg :
print("Sending Message...")
self.server_sock.send(self.received_msg)
self.received_msg = ""
def cancel(self):
self.cancelled = True
myfifo = ArduinoFifo()
class Profile(dbus.service.Object):
fd = -1
#dbus.service.method("org.bluez.Profile1",
in_signature="", out_signature="")
def Release(self):
print("Release")
mainloop.quit()
#dbus.service.method("org.bluez.Profile1",
in_signature="", out_signature="")
def Cancel(self):
print("Cancel")
#dbus.service.method("org.bluez.Profile1",
in_signature="oha{sv}", out_signature="")
def NewConnection(self, path, fd, properties):
global received_msg
self.fd = fd.take()
print("NewConnection(%s, %d)" % (path, self.fd))
server_sock = socket.fromfd(self.fd, socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.setblocking(1)
myfifo.openFifo()
infifo = FIFOReader(server_sock)
infifo.start()
print('enter recv loop\n')
try:
while True:
data = server_sock.recv(1024)
#print("received: %s" % data)
if data:
myfifo.writeFifo(data)
#if data == "h":
#server_sock.send("Hello!\n")
except IOError as err:
print (err)
pass
server_sock.close()
print("all done")
os.kill(os.getpid(), 9)
#dbus.service.method("org.bluez.Profile1",
in_signature="o", out_signature="")
def RequestDisconnection(self, path):
print("RequestDisconnection(%s)" % (path))
if (self.fd > 0):
os.close(self.fd)
self.fd = -1
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object("org.bluez",
"/org/bluez"), "org.bluez.ProfileManager1")
option_list = [
make_option("-C", "--channel", action="store",
type="int", dest="channel",
default=None),
]
parser = OptionParser(option_list=option_list)
(options, args) = parser.parse_args()
options.uuid = "1101"
options.psm = "3"
options.role = "server"
options.name = "Edison SPP Loopback"
options.service = "spp char loopback"
options.path = "/foo/bar/profile"
options.auto_connect = False
options.record = ""
profile = Profile(bus, options.path)
mainloop = GObject.MainLoop()
opts = {
"AutoConnect" : options.auto_connect,
}
if (options.name):
opts["Name"] = options.name
if (options.role):
opts["Role"] = options.role
if (options.psm is not None):
opts["PSM"] = dbus.UInt16(options.psm)
if (options.channel is not None):
opts["Channel"] = dbus.UInt16(options.channel)
if (options.record):
opts["ServiceRecord"] = options.record
if (options.service):
opts["Service"] = options.service
if not options.uuid:
options.uuid = str(uuid.uuid4())
manager.RegisterProfile(options.path, options.uuid, opts)
myfifo.createFifo()
mainloop.run()
EDIT: I think the problem is in writing data to FIFO or receiving incoming data from socket because in my code in C I've got this delay when I want to read a data from the input FIFO using fgets function.
EIDT2: I use this to instantly receive a message and sends a response one after another
I doubt the issue has to do with the separate thread. "Threads" in Python aren't necessarily OS-level threads but could just be operations the main OS-level thread processes asynchronously. In cPython, which most people use, this is how they work. But I do see a couple of possible issues:
I'm not familiar with some of these libs, but os.read(self.fifofile, 1) stands out. If you use the builtin open() (not the one in os) or BufferedReader, this would be buffered and therefore ok. But os.open is a low-level call that doesn't buffer reads or writes, so you're actually reading 1 byte at a time from the file handle this way, which isn't a good idea as it can cause slowdowns for a variety of hard-to-trace reasons. You should either use a higher level library for this or do the buffering yourself.
Secondly, your +='ing of the read input to the message string repeatedly is going to be slow if your Python interpreter is creating a new string internally each time. So you could be looking at O(N^2) where N is message size time complexity for something that should be O(N). It depends on your interpreter, so to make things portable, you should be appending to a list instead.
Unrelated, but if you don't know whether your FIFO file is text, you shouldn't open it in text mode or else you'll run into errors. Strings only allow valid text bytes, UTF-8 if it's Py3 and I think ASCII if it's Py2, and you'll get an error if you receive, say, 0x00.
Hope this helps.
I was trying to create a new file using ctypes in python. The file gets created and I am able to write in to it. The problem starts when I try to read from this file. It gives me an invalid file handle error. Anyone might know why this is the case.
The following is the code I used:
from ctypes import *
def CreateFile(file_name='',data=''):
file_handler=windll.Kernel32.CreateFileA(file_name,0x10000000,0,None,4,0x80,None)
pointer_to_written_data=c_int(0)
windll.Kernel32.WriteFile(file_handler,data,len(data),byref(pointer_to_written_data),None)
windll.Kernel32.CloseHandle(file_handler)
return
def ReadAFile(file_name=''):
file_handler=windll.Kernel32.CreateFileA(file_name,0x10000000,0,None,4,0x80,None)
data=create_string_buffer(4096)
pointer_to_read_data=c_int(0)
if(windll.Kernel32.ReadFile(file_handler,byref(data),1024,byref(pointer_to_read_data),None)==0):
print "Failed"
print windll.Kernel32.GetLastError()
windll.Kernel32.CloseHandle(file_handler)
print data.value
return
CreateFile("sample.txt","This is a test file!!")
ReadAFile("sample.txt")
Your code works as is after the CloseHandle edit.
When you weren't closing the file handle in CreateFile(), the file open in ReadAFile failed because the file was already open. You didn't check the error so your call to ReadFile failed. Now that the code is edited to have the CloseHandle in CreateFile, it works. Note you may need to close and re-open your IDE as it may keep a handle open to the file until you kill the process since it was a handle leak (I had that issue).
I also find in ctypes that being explicit and defining argtypes, restype, and errcheck is useful. errcheck especially, since code will throw an error and you don't have to check return values for failure.
from ctypes import *
from ctypes import wintypes as w
INVALID_HANDLE_VALUE = w.HANDLE(-1).value
GENERIC_ALL = 0x10000000
OPEN_ALWAYS = 4
FILE_ATTRIBUTE_NORMAL = 0x80
_k32 = WinDLL('kernel32',use_last_error=True)
_CreateFileA = _k32.CreateFileA
_WriteFile = _k32.WriteFile
_ReadFile = _k32.ReadFile
_CloseHandle = _k32.CloseHandle
def validate_handle(result,func,args):
if result == INVALID_HANDLE_VALUE:
raise WindowsError(get_last_error())
return result
def validate_bool(result,func,args):
if not result:
raise WindowsError(get_last_error())
_CreateFileA.argtypes = w.LPCSTR,w.DWORD,w.DWORD,c_void_p,w.DWORD,w.DWORD,w.HANDLE
_CreateFileA.restype = w.HANDLE
_CreateFileA.errcheck = validate_handle
_WriteFile.argtypes = w.HANDLE,c_void_p,w.DWORD,POINTER(w.DWORD),c_void_p
_WriteFile.restype = w.BOOL
_WriteFile.errcheck = validate_bool
_ReadFile.argtypes = w.HANDLE,w.LPVOID,w.DWORD,POINTER(w.DWORD),c_void_p
_ReadFile.restype = w.BOOL
_ReadFile.errcheck = validate_bool
_CloseHandle.argtypes = w.HANDLE,
_CloseHandle.restype = w.BOOL
_CloseHandle.errcheck = validate_bool
def CreateFile(file_name='',data=''):
file_handler = _CreateFileA(file_name,GENERIC_ALL,0,None,OPEN_ALWAYS,FILE_ATTRIBUTE_NORMAL,None)
written = w.DWORD()
try:
_WriteFile(file_handler,data,len(data),byref(written),None)
finally:
_CloseHandle(file_handler)
def ReadAFile(file_name=''):
file_handler = _CreateFileA(file_name,GENERIC_ALL,0,None,OPEN_ALWAYS,FILE_ATTRIBUTE_NORMAL,None)
data = create_string_buffer(4096)
read = w.DWORD()
try:
_ReadFile(file_handler,byref(data),1024,byref(read),None)
finally:
_CloseHandle(file_handler)
print data.value
CreateFile("sample.txt","This is a test file!!")
ReadAFile("sample.txt")