Python call script still failing - python

I've spent hours trying to run this script, but for some reason it simply cannot function properly. 'Sex' is now a global install, so it calls that fine, but something else seemingly fails. The thing that looks like it fails is the thing it's supposed to create, unfortunately. I even added a simple print statement after the def statement, and it wouldn't print, it just fails automatically. Any insight?
Code:
def objmask(inimgs, inwhts, thresh1='20.0', thresh2='2.0', tfdel=True,
xceng=3001., yceng=3001., outdir='.', tmpdir='tmp'):
print "c"
# initial detection of main galaxy with SExtractor for re-centering purposes
if outdir!='.':
if not os.path.exists(outdir):
os.makedirs(outdir)
print inimgs
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
for c in range(np.size(inimgs)):
print 'Creating Aperture Run:', c
subprocess.call(['sex',inimgs[c],'-c','/home/vidur/se_files/gccg.sex',
'-CATALOG_NAME','/home/vidur/se_files/tmp'+str(c)+'.cat',
'-PARAMETERS_NAME','/home/vidur/se_files/gccg_ell.param',
'-FILTER_NAME','/home/vidur/se_files/gccg.conv',
'-STARNNW_NAME','/home/vidur/se_files/gccg.nnw',
'-CHECKIMAGE_TYPE','APERTURES',
'-VERBOSE_TYPE','QUIET',
'-DETECT_THRESH',thresh1,
'-ANALYSIS_THRESH',thresh2,
'-WEIGHT_IMAGE',inwhts[c]],shell=True
)
Error:
---------------------------------------------------------------------------
IOError Traceback (most recent call last)
/home/vidur/se_files/<ipython-input-2-bbc58f9e134a> in <module>()
----> 1 fetch_swarp2.objmask(['sciPHOTOf105w0.fits'],['whtPHOTOf105w0.fits'])
/home/vidur/se_files/fetch_swarp2.pyc in objmask(inimgs, inwhts, thresh1, thresh2, tfdel, xceng, yceng, outdir, tmpdir)
116 secat=asciitable.read('./se_files/_tmp_seobj'+str(c)+'.cat',
117 names=['flux','ferr','xmin','ymin','xmax','ymax',
--> 118 'xc','yc','cxx','cyy','cxy'])
119 robj = np.sqrt((secat['xc']-xceng)**2.0+(secat['yc']-yceng)**2.0)
120 rmin = (robj==np.min(robj))
/usr/local/lib/python2.7/dist-packages/asciitable-0.8.0-py2.7.egg/asciitable/ui.pyc in read(table, numpy, guess, **kwargs)
129 guess = _GUESS
130 if guess:
--> 131 dat = _guess(table, new_kwargs)
132 else:
133 reader = get_reader(**new_kwargs)
/usr/local/lib/python2.7/dist-packages/asciitable-0.8.0-py2.7.egg/asciitable/ui.pyc in _guess(table, read_kwargs)
173 try:
174 reader = get_reader(**guess_kwargs)
--> 175 dat = reader.read(table)
176 # When guessing impose additional requirements on column names and number of cols
177 bads = [" ", ",", "|", "\t", "'", '"']
/usr/local/lib/python2.7/dist-packages/asciitable-0.8.0-py2.7.egg/asciitable/core.pyc in read(self, table)
839 self.header.data = self.data
840
--> 841 self.lines = self.inputter.get_lines(table)
842 self.data.get_data_lines(self.lines)
843 self.header.get_cols(self.lines)
/usr/local/lib/python2.7/dist-packages/asciitable-0.8.0-py2.7.egg/asciitable/core.pyc in get_lines(self, table)
155 table = table.read()
156 elif '\n' not in table and '\r' not in table + '':
--> 157 table = open(table, 'r').read()
158 lines = table.splitlines()
159 except TypeError:
IOError: [Errno 2] No such file or directory: './se_files/_tmp_seobj0.cat'
The most relevant portion of the error seems to be the end.
P.S. I run Ubuntu 12.04 32-bit

Related

Brightway2: LCA scores & calculations

My problem is about getting emissions results of my functional unit from a ecoinvent excel spreadsheet format.
I managed to get activities/process impacts thanks to ca.annotated_top_processes(lca) or lca.top_activities()but emissions/biosphere flows can't be displayed but through ca.hinton_matrix(lca, rows=10, cols=10). How can I get specific scores ?
Here's the situation:
import brightway2 as bw
from stats_arrays import *
import bw2analyzer as bwa
projects.set_current("excel_import_verif1")
bw.databases
db = bw.Database('IoTBOLLCA') #Excel spreadsheet
CC = [method for method in bw.methods if "('ReCiPe Midpoint (H) V1.13', 'climate change', 'GWP100')" in str(method)][0]
FU = [i for i in db if 'FU' in i['name']][0]
lca = bw.LCA({FU:1},CC)
lca.lci()
lca.lcia()
lca.score
ca = bwa.ContributionAnalysis()
lca.top_emissions()
and I get this error
TypeError Traceback (most recent call last)
File ~\Anaconda3\envs\bw2\lib\site-packages\scipy\sparse\_sputils.py:208, in isintlike(x)
207 try:
--> 208 operator.index(x)
209 except (TypeError, ValueError):
TypeError: 'numpy.float64' object cannot be interpreted as an integer
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
Input In [28], in <cell line: 1>()
----> 1 lca.top_emissions()
File ~\Anaconda3\envs\bw2\lib\site-packages\bw2calc\lca.py:575, in LCA.top_emissions(self, **kwargs)
573 except ImportError:
574 raise ImportError("`bw2analyzer` is not installed")
--> 575 return ContributionAnalysis().annotated_top_emissions(self, **kwargs)
File ~\Anaconda3\envs\bw2\lib\site-packages\bw2analyzer\contribution.py:152, in ContributionAnalysis.annotated_top_emissions(self, lca, names, **kwargs)
146 """Get list of most damaging biosphere flows in an LCA, sorted by ``abs(direct impact)``.
147
148 Returns a list of tuples: ``(lca score, inventory amount, activity)``. If ``names`` is False, they returns the process key as the last element.
149
150 """
151 ra, rp, rb = lca.reverse_dict()
--> 152 results = [
153 (score, lca.inventory[index, :].sum(), rb[index])
154 for score, index in self.top_emissions(
155 lca.characterized_inventory, **kwargs
156 )
157 ]
158 if names:
159 results = [(x[0], x[1], get_activity(x[2])) for x in results]
File ~\Anaconda3\envs\bw2\lib\site-packages\bw2analyzer\contribution.py:153, in <listcomp>(.0)
146 """Get list of most damaging biosphere flows in an LCA, sorted by ``abs(direct impact)``.
147
148 Returns a list of tuples: ``(lca score, inventory amount, activity)``. If ``names`` is False, they returns the process key as the last element.
149
150 """
151 ra, rp, rb = lca.reverse_dict()
152 results = [
--> 153 (score, lca.inventory[index, :].sum(), rb[index])
154 for score, index in self.top_emissions(
155 lca.characterized_inventory, **kwargs
156 )
157 ]
158 if names:
159 results = [(x[0], x[1], get_activity(x[2])) for x in results]
File ~\Anaconda3\envs\bw2\lib\site-packages\scipy\sparse\_index.py:47, in IndexMixin.__getitem__(self, key)
46 def __getitem__(self, key):
---> 47 row, col = self._validate_indices(key)
49 # Dispatch to specialized methods.
50 if isinstance(row, INT_TYPES):
File ~\Anaconda3\envs\bw2\lib\site-packages\scipy\sparse\_index.py:152, in IndexMixin._validate_indices(self, key)
149 M, N = self.shape
150 row, col = _unpack_index(key)
--> 152 if isintlike(row):
153 row = int(row)
154 if row < -M or row >= M:
File ~\Anaconda3\envs\bw2\lib\site-packages\scipy\sparse\_sputils.py:216, in isintlike(x)
214 if loose_int:
215 msg = "Inexact indices into sparse matrices are not allowed"
--> 216 raise ValueError(msg)
217 return loose_int
218 return True
ValueError: Inexact indices into sparse matrices are not allowed
This is an error as of Scipy version 1.9; for now, you can force a downgrade to Scipy 1.8.something.
This has been noted as an issue, but the focus for BW development is in other areas currently.

Dask getting "FileNotFoundError: [Errno 2] No such file or directory" in the middle of a file

I'm making a bag from a plain txt file - it's got a bunch of reviews, delimited by two newlines. But, sometimes - and I really can't predict when - it gives me FileNotFoundError: [Errno 2] No such file or directory: '/mnt/c/Workspaces/Books/Dask/foods.txt' while processing it
Here's the actual code
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import dask.dataframe as dd
from dask.diagnostics import ProgressBar
import numpy as np
import dask.bag as bag
import os
def get_next_part(file, start_index, span_index=0, blocksize=1000):
file.seek(start_index)
buffer = file.read(blocksize + span_index).decode('cp1252')
delimiter_position = buffer.find('\n\n')
if delimiter_position == -1:
return get_next_part(file, start_index, span_index + blocksize)
else:
file.seek(start_index)
return start_index, delimiter_position
def get_item(filename, start_index, delimiter_position, encoding='cp1252'):
with open(filename, 'rb') as file_handle:
file_handle.seek(start_index)
text = file_handle.read(delimiter_position).decode(encoding)
return dict((element.split(': ')[0], element.split(': ')[1])
if len(element.split(': ')) > 1
else ('unknown', element)
for element in text.strip().split('\n'))
with open(f"{os.getcwd()}/foods.txt", 'rb') as file_handle:
size = file_handle.seek(0,2) - 1
more_data = True
output = []
current_position = next_position = 0
while more_data:
if current_position >= size:
more_data = False
else:
current_position, next_position = get_next_part(file_handle, current_position, 0)
output.append((current_position, next_position))
current_position = current_position + next_position + 2
with ProgressBar():
reviews = (bag.from_sequence(output, npartitions=104)
.map(lambda x: get_item(f"{os.getcwd()}/foods.txt",
x[0],
x[1]))
.compute())
Sometimes it works fine, but other times it gives me something along these lines (different percentage every time):
[########## ] | 26% Completed | 54.3s
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-1-90a316620d10> in <module>()
42 with ProgressBar():
43 reviews = (bag.from_sequence(output, npartitions=104)
---> 44 .map(lambda x: get_item(f"{os.getcwd()}/foods.txt",
45 x[0],
46 x[1]))
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/base.py in compute(self, **kwargs)
154 dask.base.compute
155 """
--> 156 (result,) = compute(self, traverse=False, **kwargs)
157 return result
158
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/base.py in compute(*args, **kwargs)
396 keys = [x.__dask_keys__() for x in collections]
397 postcomputes = [x.__dask_postcompute__() for x in collections]
--> 398 results = schedule(dsk, keys, **kwargs)
399 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
400
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/multiprocessing.py in get(dsk, keys, num_workers, func_loads, func_dumps, optimize_graph, pool, **kwargs)
190 get_id=_process_get_id, dumps=dumps, loads=loads,
191 pack_exception=pack_exception,
--> 192 raise_exception=reraise, **kwargs)
193 finally:
194 if cleanup:
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/local.py in get_async(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)
460 _execute_task(task, data) # Re-execute locally
461 else:
--> 462 raise_exception(exc, tb)
463 res, worker_id = loads(res_info)
464 state['cache'][key] = res
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/compatibility.py in reraise(exc, tb)
109 def reraise(exc, tb=None):
110 if exc.__traceback__ is not tb:
--> 111 raise exc.with_traceback(tb)
112 raise exc
113
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/local.py in execute_task()
228 try:
229 task, data = loads(task_info)
--> 230 result = _execute_task(task, data)
231 id = get_id()
232 result = dumps((result, id))
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/core.py in _execute_task()
117 func, args = arg[0], arg[1:]
118 args2 = [_execute_task(a, cache) for a in args]
--> 119 return func(*args2)
120 elif not ishashable(arg):
121 return arg
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/bag/core.py in reify()
1589 def reify(seq):
1590 if isinstance(seq, Iterator):
-> 1591 seq = list(seq)
1592 if seq and isinstance(seq[0], Iterator):
1593 seq = list(map(list, seq))
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/bag/core.py in map_chunk()
1749 else:
1750 for a in zip(*args):
-> 1751 yield f(*a)
1752
1753 # Check that all iterators are fully exhausted
<ipython-input-1-90a316620d10> in <lambda>()
44 .map(lambda x: get_item(f"{os.getcwd()}/foods.txt",
45 x[0],
---> 46 x[1]))
47 .compute())
<ipython-input-1-90a316620d10> in get_item()
18
19 def get_item(filename, start_index, delimiter_position, encoding='cp1252'):
---> 20 with open(filename, 'rb') as file_handle:
21 file_handle.seek(start_index)
22 text = file_handle.read(delimiter_position).decode(encoding)
FileNotFoundError: [Errno 2] No such file or directory: '/mnt/c/Workspaces/Books/Dask/foods.txt'
I've tried messing with the partition numbers - leaving it as default (101), or making sure it's a multiple of 4. Doesn't seem to have an effect.
Anyone know what's going on here? It usually works if I run it a second time, but that's still tough to deal with.
I'm using the latest version of Dask. Using conda, it's all in Jupyterlab, and I'm running it from Windows Subsystem for Linux
Thanks!
Wasn't able to fix my initial read method, but was able to find another way of doing the parallel read (with native Dask objects too!)
Sections were delimited with \n\n and the linedelimiter argument to bag didn't mean what I thought it meant, but with this I was able to figure a way to get the sections I needed: Why `linedelimiter` does not work for bag.read_text?
bag.read_text(
f"{os.getcwd()}/foods.txt",
encoding="cp1252",
blocksize="10MB",
linedelimiter="\n\n",
)
.map_partitions(lambda x: "".join(x).split("\n\n"))

Import audio files in python for analysis e.g Signal Analysis

I am trying to import a .wav file to perform Signal analysis on it. I have used all the Ipython,wave libraries that i am meant to import but its still showing me error.Some of the libraries were gotten from a book downloaded from git hub repository(https://github.com/AllenDowney/ThinkDSP). Can anyone one show me what is wrong with the code?
(This is after importing all necessary libraries in the book and in python)
Error Traceback (most recent call last)
in ()
----> 1 wave= thinkdsp.read_wave('365515__noedell__noedell-shady-scheme-01.wav')
C:\Users\Ademola\Desktop\500 Level\DSP\DSP_Python\ThinkDSP-master\ThinkDSP-master\code\thinkdsp.py in read_wave(filename)
99 returns: Wave
100 """
--> 101 fp = open_wave(filename, 'r')
102
103 nchannels = fp.getnchannels()
C:\Users\Ademola\Anaconda3\lib\wave.py in open(f, mode)
497 mode = 'rb'
498 if mode in ('r', 'rb'):
--> 499 return Wave_read(f)
500 elif mode in ('w', 'wb'):
501 return Wave_write(f)
C:\Users\Ademola\Anaconda3\lib\wave.py in init(self, f)
161 # else, assume it is an open file object already
162 try:
--> 163 self.initfp(f)
164 except:
165 if self._i_opened_the_file:
C:\Users\Ademola\Anaconda3\lib\wave.py in initfp(self, file)
141 chunkname = chunk.getname()
142 if chunkname == b'fmt ':
--> 143 self._read_fmt_chunk(chunk)
144 self._fmt_chunk_read = 1
145 elif chunkname == b'data':
C:\Users\Ademola\Anaconda3\lib\wave.py in _read_fmt_chunk(self, chunk)
258 self._sampwidth = (sampwidth + 7) // 8
259 else:
--> 260 raise Error('unknown format: %r' % (wFormatTag,))
261 self._framesize = self._nchannels * self._sampwidth
262 self._comptype = 'NONE'
Error: unknown format: 3
Without seeing your code its hard to answer your question...you can read wav files with the wav module that comes standard in python. Basic syntax below:
import wave
wav = wave.open('wavFile.wav', 'r')
here is the documentation:
https://docs.python.org/2/library/wave.html
Let me know if this helps!

Why I am getting the following error opening a xlsx?

Hello I am trying to open a .xlsx file with python, my code is really simple it looks as follows:
import openpyxl
wb = openpyxl.load_workbook('prod334.xlsx')
However I am getting an error, I am not sure about the cause of this error since I am just opening the file, I would like to appreciate any suggestion to overcome this failure.
The error that I am getting is the following:
__init__() got an unexpected keyword argument 'vertAlign'
TypeError Traceback (most recent call last)
main.py in <module>()
2
3
----> 4 wb = openpyxl.load_workbook('prod334.xlsx')
/usr/local/lib/anaconda/lib/python2.7/site-packages/openpyxl/reader/excel.pyc in load_workbook(filename, use_iterators, keep_vba, guess_types, data_only)
163
164 try:
--> 165 _load_workbook(wb, archive, filename, use_iterators, keep_vba)
166 except KeyError:
167 e = exc_info()[1]
/usr/local/lib/anaconda/lib/python2.7/site-packages/openpyxl/reader/excel.pyc in _load_workbook(wb, archive, filename, use_iterators, keep_vba)
210 assert wb.loaded_theme == None, "even though the theme information is missing there is a theme object ?"
211
--> 212 style_properties = read_style_table(archive.read(ARC_STYLE))
213 style_table = style_properties.pop('table')
214 wb.shared_styles = style_properties.pop('list')
/usr/local/lib/anaconda/lib/python2.7/site-packages/openpyxl/reader/style.pyc in read_style_table(xml_source)
221 def read_style_table(xml_source):
222 p = SharedStylesParser(xml_source)
--> 223 p.parse()
224 return p.style_prop
/usr/local/lib/anaconda/lib/python2.7/site-packages/openpyxl/reader/style.pyc in parse(self)
37 self.parse_color_index()
38 self.style_prop['color_index'] = self.color_index
---> 39 self.font_list = list(self.parse_fonts())
40 self.fill_list = list(self.parse_fills())
41 self.border_list = list(self.parse_borders())
/usr/local/lib/anaconda/lib/python2.7/site-packages/openpyxl/reader/style.pyc in parse_fonts(self)
89 if fonts is not None:
90 for node in safe_iterator(fonts, '{%s}font' % SHEET_MAIN_NS):
---> 91 yield self.parse_font(node)
92
93 def parse_font(self, font_node):
/usr/local/lib/anaconda/lib/python2.7/site-packages/openpyxl/reader/style.pyc in parse_font(self, font_node)
105 if color is not None:
106 font['color'] = Color(**dict(color.items()))
--> 107 return Font(**font)
108
109 def parse_fills(self):
TypeError: __init__() got an unexpected keyword argument 'vertAlign'

Using Sacred Module with iPython

I am trying to set up sacred for Python and I am going through the tutorial. I was able to set up sacred using pip install sacred with no issues. I am having trouble running the basic code:
from sacred import Experiment
ex = Experiment("hello_world")
Running this code returns the a ValueError:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-25-66f549cfb192> in <module>()
1 from sacred import Experiment
2
----> 3 ex = Experiment("hello_world")
/Users/ryandevera/anaconda/lib/python2.7/site-packages/sacred/experiment.pyc in __init__(self, name, ingredients)
42 super(Experiment, self).__init__(path=name,
43 ingredients=ingredients,
---> 44 _caller_globals=caller_globals)
45 self.default_command = ""
46 self.command(print_config, unobserved=True)
/Users/ryandevera/anaconda/lib/python2.7/site-packages/sacred/ingredient.pyc in __init__(self, path, ingredients, _caller_globals)
48 self.doc = _caller_globals.get('__doc__', "")
49 self.sources, self.dependencies = \
---> 50 gather_sources_and_dependencies(_caller_globals)
51
52 # =========================== Decorators ==================================
/Users/ryandevera/anaconda/lib/python2.7/site-packages/sacred/dependencies.pyc in gather_sources_and_dependencies(globs)
204 def gather_sources_and_dependencies(globs):
205 dependencies = set()
--> 206 main = Source.create(globs.get('__file__'))
207 sources = {main}
208 experiment_path = os.path.dirname(main.filename)
/Users/ryandevera/anaconda/lib/python2.7/site-packages/sacred/dependencies.pyc in create(filename)
61 if not filename or not os.path.exists(filename):
62 raise ValueError('invalid filename or file not found "{}"'
---> 63 .format(filename))
64
65 mainfile = get_py_file_if_possible(os.path.abspath(filename))
ValueError: invalid filename or file not found "None"
I am not sure why this error is returning. The documentation does not say anything about setting up an Experiment file prior to running the code. Any help would be greatly appreciated!
The traceback given indicates that the constructor for Experiment searches its namespace to find the file in which its defined.
Thus, to make the example work, place the example code into a file and run that file directly.
If you are using ipython, then you could always try using the %%python command, which will effectively capture the code you give it into a file before running it (in a separate python process).
According to the docs, if you're in IPython/Jupyter, you can allow the Experiment to run in a non-reproducible interactive environment:
ex = Experiment('jupyter_ex', interactive=True)
https://sacred.readthedocs.io/en/latest/experiment.html#run-the-experiment
The docs say it nicely (TL;DR: sacred checks this for you and fails in order to warn you)
Warning
By default, Sacred experiments will fail if run in an interactive
environment like a REPL or a Jupyter Notebook. This is an intended
security measure since in these environments reproducibility cannot be
ensured. If needed, this safeguard can be deactivated by passing
interactive=True to the experiment like this:
ex = Experiment('jupyter_ex', interactive=True)
Setting interactive=True doesn't work if you run the notebook as a script through ipython.
$ ipython code.ipynb
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
Cell In[1], line 1
----> 1 ex = Experiment("image_classification", interactive=True)
2 ex.observers.append(NeptuneObserver(run=neptune_run))
File ~\miniconda3\envs\py38\lib\site-packages\sacred\experiment.py:119, in Experiment.__init__(self, name, ingredients, interactive, base_dir, additional_host_info, additional_cli_options, save_git_info)
117 elif name.endswith(".pyc"):
118 name = name[:-4]
--> 119 super().__init__(
120 path=name,
121 ingredients=ingredients,
122 interactive=interactive,
123 base_dir=base_dir,
124 _caller_globals=caller_globals,
125 save_git_info=save_git_info,
126 )
127 self.default_command = None
128 self.command(print_config, unobserved=True)
File ~\miniconda3\envs\py38\lib\site-packages\sacred\ingredient.py:75, in Ingredient.__init__(self, path, ingredients, interactive, _caller_globals, base_dir, save_git_info)
69 self.save_git_info = save_git_info
70 self.doc = _caller_globals.get("__doc__", "")
71 (
72 self.mainfile,
73 self.sources,
74 self.dependencies,
---> 75 ) = gather_sources_and_dependencies(
76 _caller_globals, save_git_info, self.base_dir
77 )
78 if self.mainfile is None and not interactive:
79 raise RuntimeError(
80 "Defining an experiment in interactive mode! "
81 "The sourcecode cannot be stored and the "
82 "experiment won't be reproducible. If you still"
83 " want to run it pass interactive=True"
84 )
File ~\miniconda3\envs\py38\lib\site-packages\sacred\dependencies.py:725, in gather_sources_and_dependencies(globs, save_git_info, base_dir)
723 def gather_sources_and_dependencies(globs, save_git_info, base_dir=None):
724 """Scan the given globals for modules and return them as dependencies."""
--> 725 experiment_path, main = get_main_file(globs, save_git_info)
727 base_dir = base_dir or experiment_path
729 gather_sources = source_discovery_strategies[SETTINGS["DISCOVER_SOURCES"]]
File ~\miniconda3\envs\py38\lib\site-packages\sacred\dependencies.py:596, in get_main_file(globs, save_git_info)
594 main = None
595 else:
--> 596 main = Source.create(globs.get("__file__"), save_git_info)
461 return Source(main_file, get_digest(main_file), repo, commit, is_dirty)
File ~\miniconda3\envs\py38\lib\site-packages\sacred\dependencies.py:382, in get_py_file_if_possible(pyc_name)
380 if pyc_name.endswith((".py", ".so", ".pyd")):
381 return pyc_name
--> 382 assert pyc_name.endswith(".pyc")
383 non_compiled_file = pyc_name[:-1]
384 if os.path.exists(non_compiled_file):
sacred==0.8.2

Categories