YOLOv5 paging file too small - python

I am trying to train a YOLOv5 CNN on a custom dataset with around 10000 training examples, but I keep getting the following error:
C:\Program Files\Python39\lib\site-packages\setuptools\distutils_patch.py:25: UserWarning: Distutils was imported before Setuptools. This usage is discouraged and may exhibit undesirable behaviors or errors. Please use Setuptools' objects directly or at least import Setuptools first.
warnings.warn(
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Program Files\Python39\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:\Program Files\Python39\lib\multiprocessing\spawn.py", line 125, in _main
prepare(preparation_data)
File "C:\Program Files\Python39\lib\multiprocessing\spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "C:\Program Files\Python39\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
File "C:\Program Files\Python39\lib\runpy.py", line 268, in run_path
return _run_module_code(code, init_globals, run_name,
File "C:\Program Files\Python39\lib\runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "C:\Program Files\Python39\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\Malth\OneDrive - Aarhus Universitet\7. Semester\DL\YOLO\yolov5\train.py", line 20, in <module>
import torch
File "C:\Users\Malth\AppData\Roaming\Python\Python39\site-packages\torch\__init__.py", line 124, in <module>
raise err
OSError: [WinError 1455] The paging file is too small for this operation to complete. Error loading "C:\Users\Malth\AppData\Roaming\Python\Python39\site-packages\torch\lib\caffe2_detectron_ops_gpu.dll" or one of its dependencies.
Right before the error occurs the program uses up the entirety of my pagefile of 30GB + 10GB of assigned RAM.
I am running train.py with the following command:
python train.py --rect --batch 16 --epochs 3 --data CCPDMini.yaml --weights yolov5s.pt
My python version is 3.9.2
My PyTorch version is 1.9.1+cu111
I have the following setup:
Intel Core i7-4790 CPU
16 GB RAM
RTX2070 8GB VRAM
I have already tried reducing the batch size and the number of dataloader workers, but to no awail

Related

While running whisper I am facing FileNotFoundError

This is the command I am running from powershell:
whisper C:\Users\SAMSUNG\Desktop\aud2.mp3 --language Urdu --task translate --model base
This is the complete error:
UserWarning: FP16 is not supported on CPU; using FP32 instead
warnings.warn("FP16 is not supported on CPU; using FP32 instead")
Traceback (most recent call last):
File "c:\users\samsung\appdata\local\programs\python\python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\users\samsung\appdata\local\programs\python\python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\SAMSUNG\.envs\py3.7\Scripts\whisper.exe\__main__.py", line 7, in <module>
File "C:\Users\SAMSUNG\.envs\py3.7\lib\site-packages\whisper\transcribe.py", line 307, in cli
result = transcribe(model, audio_path, temperature=temperature, **args)
File "C:\Users\SAMSUNG\.envs\py3.7\lib\site-packages\whisper\transcribe.py", line 84, in transcribe
mel = log_mel_spectrogram(audio)
File "C:\Users\SAMSUNG\.envs\py3.7\lib\site-packages\whisper\audio.py", line 111, in log_mel_spectrogram
audio = load_audio(audio)
File "C:\Users\SAMSUNG\.envs\py3.7\lib\site-packages\whisper\audio.py", line 44, in load_audio
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
File "C:\Users\SAMSUNG\.envs\py3.7\lib\site-packages\ffmpeg\_run.py", line 320, in run
overwrite_output=overwrite_output,
File "C:\Users\SAMSUNG\.envs\py3.7\lib\site-packages\ffmpeg\_run.py", line 285, in run_async
args, stdin=stdin_stream, stdout=stdout_stream, stderr=stderr_stream
File "c:\users\samsung\appdata\local\programs\python\python37\lib\subprocess.py", line 756, in __init__
restore_signals, start_new_session)
File "c:\users\samsung\appdata\local\programs\python\python37\lib\subprocess.py", line 1155, in _execute_child
startupinfo)
FileNotFoundError: [WinError 2] The system cannot find the file specified

python 3.10 - no libpython3.10.so - pyinstaller fails

I am having a problem with pyinstaller with python 3.10.5 on Ubuntu 18.04. I built Python 3.10.5 from source.
When I run pyinstaller with 3.10 I get the error below. When I run with 3.6 I get:
08:32:09 44362 INFO: Python library not in binary dependencies. Doing additional searching...
08:32:09 44394 INFO: Using Python library /usr/lib/x86_64-linux-gnu/libpython3.6m.so.1.0
Looking on my machine there is no libpython3.10m.so, so I wonder if this is the problem? (There is a libpython3.10.a)
Would having a libpython3.10m.so fix the problem?
Where do I get one from or how do I build one?
pyinstaller/python 3.10 errors:
INFO: Python library not in binary dependencies. Doing additional searching...
Traceback (most recent call last):
File "/usr/local/lib/python3.10/runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/local/lib/python3.10/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/__main__.py", line 134, in <module>
run()
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/__main__.py", line 124, in run
run_build(pyi_config, spec_file, **vars(args))
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/__main__.py", line 58, in run_build
PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/building/build_main.py", line 782, in main
build(specfile, kw.get('distpath'), kw.get('workpath'), kw.get('clean_build'))
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/building/build_main.py", line 714, in build
exec(code, spec_namespace)
File "/home/jenkins/workspace/TOOLS/tools-helper-build-python3-linux/hosttools/python/maxwell_dm.spec", line 7, in <module>
a = Analysis(['maxwell_dm_gui/app_entry.py'],
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/building/build_main.py", line 277, in __init__
self.__postinit__()
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/building/datastruct.py", line 155, in __postinit__
self.assemble()
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/building/build_main.py", line 501, in assemble
self._check_python_library(self.binaries)
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/building/build_main.py", line 605, in _check_python_library
python_lib = bindepend.get_python_library_path()
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/depend/bindepend.py", line 897, in get_python_library_path
python_libname = findLibrary(name)
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/depend/bindepend.py", line 775, in findLibrary
lib = _which_library(name, paths)
File "/home/jenkins/.local/lib/python3.10/site-packages/PyInstaller/depend/bindepend.py", line 806, in _which_library
for _path in os.listdir(path):
FileNotFoundError: [Errno 2] No such file or directory: '/usr/lib64'

fcntl error with “mlflow ui” on windows - mlflow 1.0

I am getting the following error message when trying mlflow examples and running 'mlflow ui'.
Error:
ModuleNotFoundError: No module named 'fcntl' Running the mlflow server
failed. Please see the logs above for details
Is anyone aware of a solution to this issue?
I have tried the solutions suggested at https://github.com/mlflow/mlflow/pull/1080
without success. Replacing the modified files in mlflow source code, it raises other issues for not finding what it is looking for with the following:
Traceback (most recent call last):
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\ProgramData\Anaconda3\envs\thesis_mlflow\Scripts\mlflow.exe\__main__.py", line 9, in <module>
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\site-packages\click\core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\site-packages\click\core.py", line 717, in main
rv = self.invoke(ctx)
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\site-packages\click\core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\site-packages\click\core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\site-packages\click\core.py", line 555, in invoke
return callback(*args, **kwargs)
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\site-packages\mlflow\cli.py", line 198, in ui
_run_server(backend_store_uri, default_artifact_root, "127.0.0.1", port, None, 1)
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\site-packages\mlflow\server\__init__.py", line 90, in _run_server
exec_cmd(full_command, env=env_map, stream_output=True)
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\site-packages\mlflow\utils\process.py", line 34, in exec_cmd
stdin=subprocess.PIPE, **kwargs)
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "c:\programdata\anaconda3\envs\thesis_mlflow\lib\subprocess.py", line 1017, in _execute_child
startupinfo)
FileNotFoundError: [WinError 2] The system cannot find the file specified
Just solved the issue: for some reason, waitress was not installed in the running environment. After installing it, everything seems working fine with the solution #1080 linked above in the question.
Perform the following installation before executing the mlflow ui command:
pip install waitress

Numerous remote errors when following TensorFlow Pets on Google Cloud tutorial

Following instructions in the "Distributed Training on the Oxford-IIIT Pets Dataset on Google Cloud" tutorial on the official TensorFlow Models repo, I'm running into some issues. First, this:
Termination reason: Error. Traceback (most recent call last): File
"/usr/lib/python2.7/runpy.py", line 174, in _run_module_as_main
"main", fname, loader, pkg_name) File
"/usr/lib/python2.7/runpy.py", line 72, in _run_code exec code in
run_globals File
"/root/.local/lib/python2.7/site-packages/object_detection/train.py",
line 51, in from object_detection.builders import
model_builder File
"/root/.local/lib/python2.7/site-packages/object_detection/builders/model_builder.py",
line 29, in from object_detection.meta_architectures import
ssd_meta_arch File
"/root/.local/lib/python2.7/site-packages/object_detection/meta_architectures/ssd_meta_arch.py",
line 32, in from object_detection.utils import
visualization_utils File
"/root/.local/lib/python2.7/site-packages/object_detection/utils/visualization_utils.py",
line 25, in import matplotlib; matplotlib.use('Agg') #
pylint: disable=multiple-statements ImportError: No module named
matplotlib
The takeaway from this was the last part - "No module named matplotlib". Following some advice online, I edited the provided setup.py, to add "matplotlib" as a requirement:
REQUIRED_PACKAGES = ['Pillow>=1.0', 'matplotlib']
Running it again, that solved the issue. Odd - you'd assume with it being a tutorial, it wouldn't have that issue. Next though, it ran into a new issue:
Termination reason: Error. Traceback (most recent call last): File
"/usr/lib/python2.7/runpy.py", line 174, in _run_module_as_main
"main", fname, loader, pkg_name) File
"/usr/lib/python2.7/runpy.py", line 72, in _run_code exec code in
run_globals File
"/root/.local/lib/python2.7/site-packages/object_detection/train.py",
line 167, in tf.app.run() File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py",
line 48, in run _sys.exit(main(_sys.argv[:1] + flags_passthrough))
File
"/root/.local/lib/python2.7/site-packages/object_detection/train.py",
line 163, in main worker_job_name, is_chief, FLAGS.train_dir) File
"/root/.local/lib/python2.7/site-packages/object_detection/trainer.py",
line 264, in train train_config.prefetch_queue_capacity,
data_augmentation_options) File
"/root/.local/lib/python2.7/site-packages/object_detection/trainer.py",
line 59, in create_input_queue tensor_dict = create_tensor_dict_fn()
File
"/root/.local/lib/python2.7/site-packages/object_detection/train.py",
line 120, in get_next dataset_builder.build(config)).get_next() File
"/root/.local/lib/python2.7/site-packages/object_detection/builders/dataset_builder.py",
line 164, in build functools.partial(tf.data.TFRecordDataset,
buffer_size=8 * 1000 * 1000), AttributeError: 'module' object has no
attribute 'data' The replica worker 0 exited with a non-zero status of
1.
With no relevant search results for this issue, it's difficult to know what the problem is, though one answer suggested an out of date version of TensorFlow. The stated TensorFlow version for this project is TensorFlow 1.2. TensorFlow is now on version 1.7, so maybe that's where the issue arrises. The options for runtime version list are 1.2, 1.4, 1.5 and 1.6. Trying it with 1.6, I got a different error:
Termination reason: Error. Traceback (most recent call last): [...]
File
"/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/slim/python/slim/learning.py",
line 746, in train master, start_standard_services=False,
config=session_config) as sess: File
"/usr/lib/python2.7/contextlib.py", line 17, in enter return
self.gen.next() File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/supervisor.py",
line 1000, in managed_session
self.stop(close_summary_writer=close_summary_writer) File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/supervisor.py",
line 828, in stop ignore_live_threads=ignore_live_threads) File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/coordinator.py",
line 389, in join six.reraise(*self._exc_info_to_raise) File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/supervisor.py",
line 989, in managed_session
start_standard_services=start_standard_services) File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/supervisor.py",
line 734, in prepare_or_wait_for_session max_wait_secs=max_wait_secs)
File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/session_manager.py",
line 402, in wait_for_session sess) File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/session_manager.py",
line 486, in _try_run_local_init_op sess.run(self._local_init_op) File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py",
line 905, in run run_metadata_ptr) File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py",
line 1137, in _run feed_dict_tensor, options, run_metadata) File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py",
line 1355, in _do_run options, run_metadata) File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py",
line 1374, in _do_call raise type(e)(node_def, op, message)
UnavailableError: OS Error The replica worker 1 exited with a non-zero
status of 1.
Again, there doesn't seem to be a solution to this error right now. So I'm stabbing in the dark. I try it again with TensorFlow 1.4. New error:
Termination reason: Error. Traceback (most recent call last): File
"/usr/lib/python2.7/runpy.py", line 174, in _run_module_as_main
"main", fname, loader, pkg_name) File
"/usr/lib/python2.7/runpy.py", line 72, in _run_code exec code in
run_globals File
"/root/.local/lib/python2.7/site-packages/object_detection/train.py",
line 167, in tf.app.run() File
"/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py",
line 48, in run _sys.exit(main(_sys.argv[:1] + flags_passthrough))
File
"/root/.local/lib/python2.7/site-packages/object_detection/train.py",
line 163, in main worker_job_name, is_chief, FLAGS.train_dir) File
"/root/.local/lib/python2.7/site-packages/object_detection/trainer.py",
line 264, in train train_config.prefetch_queue_capacity,
data_augmentation_options) File
"/root/.local/lib/python2.7/site-packages/object_detection/trainer.py",
line 59, in create_input_queue tensor_dict = create_tensor_dict_fn()
File
"/root/.local/lib/python2.7/site-packages/object_detection/train.py",
line 120, in get_next dataset_builder.build(config)).get_next() File
"/root/.local/lib/python2.7/site-packages/object_detection/builders/dataset_builder.py",
line 165, in build process_fn, config.input_path[:],
input_reader_config) File
"/root/.local/lib/python2.7/site-packages/object_detection/utils/dataset_util.py",
line 133, in read_dataset tf.contrib.data.parallel_interleave(
AttributeError: 'module' object has no attribute 'parallel_interleave'
The replica worker 0 exited with a non-zero status of 1
I'm finding myself deep within a world of errors now, and don't really know what my next steps should be. I'm simply following the steps of the tutorial, executing the lines of code they say to execute, and receiving these remote errors after 5-10 mins of execution.
Any advice on how to overcome these issues would be appreciated.
You have an installation issue. Uninstall everything and confirm uninstalled by starting Python and importing what you uninstalled to ensure you observe ImportError for each uninstalled package.
Then carefully follow the steps on the installation page which indeed indicate separate installation steps for matplotlib and more.
Some of these errors were supposed to be happening until the following commit.
Using the repo now, following the instructions in here works for me. Only looks like you need to use --runtime-version 1.7 flag.
If you keep having issues make sure you are following the installation instructions using sudo.
If not, some people were still saying they needed to add Tensorflow and Jupyter also in the setup.py (that was not my case though)

Pyinstaller Not Compiling Large Program in Python2.7 64-Bit Ubuntu

I have been having some difficulties compiling my Python program using Pyinstaller. My program has multiple modules and about 2000 lines of code. It uses TaurusScada as a sort of wrapper for PYQT, Numpy, and a few other modules. The platform is Python 2.7 and Ubuntu 15.04. I'm also using Pyinstaller 3.1. I was successful in compiling a small "Hello World" type application so I know it works. I am not married to Pyinstaller as I have tried cx_Freeze as well without success. Rather than leaving my entire code on here I'll start by leaving the traceback and error in hopes that someone can shed some light on this:
66874 INFO: Processing hook hook-gi.repository.GdkPixbuf.py
Traceback (most recent call last):
File "/usr/local/bin/pyinstaller", line 11, in <module>
sys.exit(run())
File "/usr/local/lib/python2.7/dist-packages/PyInstaller/__main__.py", line 90, in run
run_build(pyi_config, spec_file, **vars(args))
File "/usr/local/lib/python2.7/dist-packages/PyInstaller/__main__.py", line 46, in run_build
PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/PyInstaller/building/build_main.py", line 755, in main
build(specfile, kw.get('distpath'), kw.get('workpath'), kw.get('clean_build'))
File "/usr/local/lib/python2.7/dist-packages/PyInstaller/building/build_main.py", line 701, in build
exec(text, spec_namespace)
File "<string>", line 16, in <module>
File "/usr/local/lib/python2.7/dist-packages/PyInstaller/building/build_main.py", line 212, in __init__
self.__postinit__()
File "/usr/local/lib/python2.7/dist-packages/PyInstaller/building/datastruct.py", line 183, in __postinit__
self.assemble()
File "/usr/local/lib/python2.7/dist-packages/PyInstaller/building/build_main.py", line 432, in assemble
imphook_object = ImportHook(imported_name, hook_file)
File "/usr/local/lib/python2.7/dist-packages/PyInstaller/building/imphook.py", line 182, in __init__
self._module = importlib_load_source(hook_modname, self._filename)
File "/usr/local/lib/python2.7/dist-packages/PyInstaller/hooks/hook-gi.repository.GdkPixbuf.py", line 36, in <module>
cachedata = subprocess.check_output('gdk-pixbuf-query-loaders')
File "/usr/lib/python2.7/subprocess.py", line 566, in check_output
process = Popen(stdout=PIPE, *popenargs, **kwargs)
File "/usr/lib/python2.7/subprocess.py", line 710, in __init__
errread, errwrite)
File "/usr/lib/python2.7/subprocess.py", line 1335, in _execute_child
raise child_exception
OSError: [Errno 2] No such file or directory
Any help is much appreciated.
i had a similar error today. You are missing the gdk-pixbuf library:
so just run:
sudo apt-get install libgdk-pixbuf2.0-dev
and pyinstaller should go smoothly

Categories