How to fix raspberry pi 4 keyboard error? - python

In raspberry pi when I try to call keyboard module i get error like:
Traceback (most recent call last):
File "emergencyHorus.py", line 161, in <module>
detectKeyboard()
File "emergencyHorus.py", line 49, in detectKeyboard
if keyboard.is_pressed('d'):
File "/usr/local/lib/python3.7/dist-packages/keyboard/_init_.py", line 417, in is_pressed
steps = parse_hotkey(hotkey)
File "/usr/local/lib/python3.7/dist-packages/keyboard/_init_.py", line 344, in parse_hotkey
scan_codes = key_to_scan_codes(hotkey)
File "/usr/local/lib/python3.7/dist-packages/keyboard/_init_.py", line 317, in key_to_scan_codes
t = tuple(_collections.OrderedDict((scan_code, True) for scan_code, modifier in _os_keyboard.map_name(normalized)))
File "/usr/local/lib/python3.7/dist-packages/keyboard/_init_.py", line 317, in <genexpr>
t = tuple(_collections.OrderedDict((scan_code, True) for scan_code, modifier in _os_keyboard.map_name(normalized)))
File "/usr/local/lib/python3.7/dist-packages/keyboard/_nixkeyboard.py", line 153, in map_name
if len(parts) > 1 and parts[0] in ('left', 'right'):
KeyboardInterrupt
In my computuer code works great. Help please.

Related

Python ValueError issue when using CCXT stop_market order for Deribit

OS: Mac,
Programming Language version: Python 3.8.3,
CCXT version: '1.77.71'
Hello when I execute the below code I continue to receive the ValueError shown below. When I test a different order exchange.create_order("ETH/USD:ETH", "limit","sell", order_size, 3650) it functions without a problem. It seems to be something I am doing with the stop market order specifically. I've spent about 5 hours now searching so I could really use some help. The exchange is Deribit.
S_order = exchange.create_order("ETH/USD:ETH", "stop_market","sell", order_size, None, {"trigger_price": 3470, "trigger": "last_price"})
Traceback (most recent call last): File "/Users/al/Desktop/Visual Studio/Test/RH_boty.py", line 142, in
schedule.run_pending() File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/schedule/__init__.py",
line 780, in run_pending
default_scheduler.run_pending() File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/schedule/__init__.py",
line 100, in run_pending
self._run_job(job) File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/schedule/__init__.py",
line 172, in _run_job
ret = job.run() File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/schedule/__init__.py",
line 661, in run
ret = self.job_func() File "/Users/al/Desktop/Visual Studio/Test/RH_boty.py", line 135, in run_bot
check_buy_sell_signals(reversal_hunter_data) File "/Users/al/Desktop/Visual Studio/Test/RH_boty.py", line 98, in
check_buy_sell_signals
S_order = exchange.create_order("ETH/USD:ETH", "stop_market","sell", order_size, None, {"trigger_price": 3470,
"type": "stop_market", "trigger": "last_price"}) File
"/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/ccxt/deribit.py",
line 1359, in create_order
return self.parse_order(order, market) File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/ccxt/deribit.py",
line 1201, in parse_order
return self.safe_order({ File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/ccxt/base/exchange.py",
line 2564, in safe_order
price = self.omit_zero(self.safe_string(order, 'price')) **File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/ccxt/base/exchange.py",
line 2732, in omit_zero
if float(string_number) == 0: ValueError: could not convert string to float: 'market_price'**

Error executing FMU model with pyFMI: "pyfmi.fmi.FMUException: Failed to get the Boolean values"

I am using the code below to simulate a model.
def run_demo(with_plots=True):
traj = np.array([[start_time,2.25]])
input_object = ('input_1[1]', traj)
model = load_fmu('[pyfmimodel.fmu',log_level=7)
opts = model.simulate_options ()
opts['ncp']=266
# Simulate
res = model.simulate(options=opts, input=input_object,final_time=stop_time )
This is the error I am getting. I need help to resolve this error.
Traceback (most recent call last):
File "D:\Projects\Python\DOCKER\model_2.py", line 55, in <module>
run_demo()
File "D:\Projects\Python\DOCKER\model_2.py", line 38, in run_demo
res = model.simulate(options=opts, input=input_object,final_time=stop_time )
File "src\pyfmi\fmi.pyx", line 7519, in pyfmi.fmi.FMUModelCS2.simulate
File "src\pyfmi\fmi.pyx", line 378, in pyfmi.fmi.ModelBase._exec_simulate_algorithm
File "src\pyfmi\fmi.pyx", line 372, in pyfmi.fmi.ModelBase._exec_simulate_algorithm
File "C:\Users\tcto5k\Miniconda3\lib\site-packages\pyfmi\fmi_algorithm_drivers.py", line 984, in __init__
self.result_handler.simulation_start()
File "C:\Users\tcto5k\Miniconda3\lib\site-packages\pyfmi\common\io.py", line 2553, in simulation_start
[parameter_data, sorted_vars_real_vref, sorted_vars_int_vref, sorted_vars_bool_vref] = fmi_util.prepare_data_info(data_info, sorted_vars,
File "src\pyfmi\fmi_util.pyx", line 257, in pyfmi.fmi_util.prepare_data_info
File "src\pyfmi\fmi_util.pyx", line 337, in pyfmi.fmi_util.prepare_data_info
File "src\pyfmi\fmi.pyx", line 4377, in pyfmi.fmi.FMUModelBase2.get_boolean
pyfmi.fmi.FMUException: Failed to get the Boolean values.
This is the FMU model variable definition which accepts 1D array as input:
<ScalarVariable name="input_1[1]" valueReference="0" description="u" causality="input" variability="continuous">
<Real start="2.0"/>
</ScalarVariable>
<!-- 2 -->
<ScalarVariable name="dense_3[1]" valueReference="614" description="y (1st order)" causality="output" variability="continuous" initial="calculated">
<Real/>
</ScalarVariable>

xlwings recently stopped getting live data from excel via Range

I was running a script to get data from excel for over a year using the Xlwings range command like so...
list=Range('A1:D10').value
Suddenly, it stopper working. I had changed nothing in the code nor the system, other than maybe installing another network card.
This is the error when trying to use the Range assignment now.
Traceback (most recent call last):
File "G:\python32\fetcher.py", line 61, in <module>
listFull = getComData()
File "G:\python32\fetcher.py", line 38, in getComData
listFull=Range('A4:H184').value
File "G:\python32\lib\site-packages\xlwings\main.py", line 1490, in __init__
impl = apps.active.range(cell1).impl
File "G:\python32\lib\site-packages\xlwings\main.py", line 439, in range
return Range(impl=self.impl.range(cell1, cell2))
File "G:\python32\lib\site-packages\xlwings\_xlwindows.py", line 457, in range
xl1 = self.xl.Range(arg1)
File "G:\python32\lib\site-packages\xlwings\_xlwindows.py", line 341, in xl
self._xl = get_xl_app_from_hwnd(self._hwnd)
File "G:\python32\lib\site-packages\xlwings\_xlwindows.py", line 251, in get_xl_app_from_hwnd
disp = COMRetryObjectWrapper(Dispatch(p))
File "G:\python32\lib\site-packages\win32com\client\__init__.py", line 96, in Dispatch
return __WrapDispatch(dispatch, userName, resultCLSID, typeinfo, clsctx=clsctx)
File "G:\python32\lib\site-packages\win32com\client\__init__.py", line 37, in __WrapDispatch
klass = gencache.GetClassForCLSID(resultCLSID)
File "G:\python32\lib\site-packages\win32com\client\gencache.py", line 180, in GetClassForCLSID
mod = GetModuleForCLSID(clsid)
File "G:\python32\lib\site-packages\win32com\client\gencache.py", line 223, in GetModuleForCLSID
mod = GetModuleForTypelib(typelibCLSID, lcid, major, minor)
File "G:\python32\lib\site-packages\win32com\client\gencache.py", line 259, in GetModuleForTypelib
mod = _GetModule(modName)
File "G:\python32\lib\site-packages\win32com\client\gencache.py", line 622, in _GetModule
mod = __import__(mod_name)
ValueError: source code string cannot contain null bytes

Python, multi-processing and memory : how does it work?

I'm working "for fun" on a dash app that compares execution time of different algorithms.
Here, I want to compare list sorting algorithms, and in this particular case, I'll be talking about the recursive merging algo.
The app works as follows: generate a set of random lists with chosen lengths, chose an algo, run the tests. Using concurrent.futures.ProcessPoolExecutor, all lists are sorted at the same time in a different process. I've tried with up to 150 lists and it works fine.
Then, I wanted the merging algo to use 2 processes if the list is bigger than 1024 element, which is also working fine... until I have more than 4 lists. And if I run the test with like 100 lists of length under 1024 and add only one list of a length that will make it use 2 processes to be sorted, it won't work and will return a memory error.
Also, I know about the multiprocessing.cpu_count() function but that doesn't seem to be of any use here? It says I have 12 cpu, but will run 150 processes at the same time and crash for 5 * 2 sub-processes.
So, could anyone explain this to me? I have 32Go of ram and we're talking about 5 lists of 2000 int so...
Edit: Adding the code I used
Each generated list is stored as a Data object:
class Data:
def __init__(self, data):
self.datas = data
self.sorted_datas = None
self.insert_sort_time = None
self.merge_sort_time = None
self.mt_merge_sort_time = None
self.heapify_sort_time = None
def __lt__(self, other):
return (len(self.datas) < len(other.datas))
def __repr__(self):
return str(self.datas)
def _sort_by_merging(self):
a = time.time()
self.sorted_datas = [item for item in self.datas]
self.sorted_datas = mergeSort(self.sorted_datas)
b = time.time() - a
return b
def _sort_by_multiprocMerging(self):
a = time.time()
self.sorted_datas = [item for item in self.datas]
self.sorted_datas = multiprocMerging(self.sorted_datas)
b = time.time() - a
return b
...
And all the Data objects are stored in a DataSet object:
class DataSet:
def __init__(self):
self.raw_datas=[]
self._datas = []
def add(self, new_data):
heapq.heappush(self.raw_datas, new_data)
def sort(self):
self._datas = [heapq.heappop(self.raw_datas) for _ in range(len(self.raw_datas))]
self.raw_datas = self._datas
def run_tests(self, *algos):
if 'merge' in algos:
self.merge_sort_time = 0
self.merge_datas = []
with concurrent.futures.ProcessPoolExecutor() as executor:
results = [executor.submit(datas._sort_by_merging) for datas in self.raw_datas]
i = 0
for result in concurrent.futures.as_completed(results):
self.merge_datas.append((len(self.raw_datas[i].datas), result.result()))
self.raw_datas[i].merge_sort_time = self.merge_datas[i][1] * 1000
self.merge_sort_time += self.raw_datas[i].merge_sort_time
i += 1
if 'mp_merge' in algos:
self.mt_merge_sort_time = 0
self.mt_merge_datas = []
with concurrent.futures.ProcessPoolExecutor() as executor:
results = [executor.submit(datas._sort_by_multiprocMerging) for datas in self.raw_datas]
i = 0
for result in concurrent.futures.as_completed(results):
self.mt_merge_datas.append((len(self.raw_datas[i].datas), result.result()))
self.raw_datas[i].mt_merge_sort_time = self.mt_merge_datas[i][1] * 1000
self.mt_merge_sort_time += self.raw_datas[i].mt_merge_sort_time
i += 1
...
And here are the sorting algos :
def mergeSort(my_list):
if len(my_list) > 1:
mid = len(my_list) // 2
left = my_list[:mid]
right = my_list[mid:]
mergeSort(left)
mergeSort(right)
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
my_list[k] = left[i]
i += 1
else:
my_list[k] = right[j]
j += 1
k += 1
while i < len(left):
my_list[k] = left[i]
i += 1
k += 1
while j < len(right):
my_list[k] = right[j]
j += 1
k += 1
return my_list
def multiprocMerging(my_list):
if len(my_list) > 1024:
mid = len(my_list) // 2
left = my_list[:mid]
right = my_list[mid:]
with concurrent.futures.ProcessPoolExecutor() as executor:
results = executor.map(mergeSort, [left, right])
sides = [result for result in results]
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if sides[0][i] < sides[1][j]:
my_list[k] = sides[0][i]
i += 1
else:
my_list[k] = sides[1][j]
j += 1
k += 1
while i < len(sides[0]):
my_list[k] = sides[0][i]
i += 1
k += 1
while j < len(sides[1]):
my_list[k] = sides[1][j]
j += 1
k += 1
else:
mergeSort(my_list)
return my_list
Edit 2 : here is the console log when the app crashes:
Traceback (most recent call last):
File "<string>", line 1, in <module>
Process SpawnProcess-186:5:
File "D:\Python\lib\multiprocessing\__init__.py", line 16, in <module>
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "D:\Python\lib\multiprocessing\__init__.py", line 16, in <module>
from . import context
File "D:\Python\lib\multiprocessing\context.py", line 6, in <module>
Process SpawnProcess-184:10:
from . import reduction
File "D:\Python\lib\multiprocessing\reduction.py", line 16, in <module>
Traceback (most recent call last):
File "D:\Python\lib\multiprocessing\process.py", line 315, in _bootstrap
self.run()
File "D:\Python\lib\multiprocessing\process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "D:\Python\lib\concurrent\futures\process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
File "D:\Python\lib\multiprocessing\queues.py", line 116, in get
return _ForkingPickler.loads(res)
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\algos\Dunod\list_sorting.py", line 2, in <module>
import numpy as np
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\numpy\__init__.py", line 140, in <module>
from . import core
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\numpy\core\__init__.py", line 72, in <module>
from . import numeric
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 779, in exec_module
Traceback (most recent call last):
File "<frozen importlib._bootstrap_external>", line 911, in get_code
File "<frozen importlib._bootstrap_external>", line 580, in _compile_bytecode
MemoryError
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 125, in _main
prepare(preparation_data)
File "D:\Python\lib\multiprocessing\spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "D:\Python\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
File "D:\Python\lib\concurrent\futures\__init__.py", line 8, in <module>
File "D:\Python\lib\sre_compile.py", line 291, in _optimize_charset
Traceback (most recent call last):
from . import context
import socket
Traceback (most recent call last):
File "<string>", line 1, in <module>
from concurrent.futures._base import (FIRST_COMPLETED,
File "D:\Python\lib\runpy.py", line 264, in run_path
Traceback (most recent call last):
charmap[k] = 1
File "D:\Python\lib\concurrent\futures\_base.py", line 7, in <module>
Traceback (most recent call last):
File "<string>", line 1, in <module>
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 125, in _main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
prepare(preparation_data)
self = reduction.pickle.load(from_parent)
IndexError: bytearray index out of range
Traceback (most recent call last):
File "<string>", line 1, in <module>
import logging
File "D:\Python\lib\multiprocessing\__init__.py", line 16, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 236, in prepare
Traceback (most recent call last):
File "D:\Python\lib\multiprocessing\context.py", line 6, in <module>
File "D:\Python\lib\socket.py", line 49, in <module>
Traceback (most recent call last):
Traceback (most recent call last):
Traceback (most recent call last):
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
File "D:\Python\lib\concurrent\futures\__init__.py", line 8, in <module>
File "<string>", line 1, in <module>
During handling of the above exception, another exception occurred:
File "<string>", line 1, in <module>
_fixup_main_from_path(data['init_main_from_path'])
File "<string>", line 1, in <module>
File "<string>", line 1, in <module>
File "<string>", line 1, in <module>
from concurrent.futures._base import (FIRST_COMPLETED,
File "D:\Python\lib\concurrent\futures\_base.py", line 7, in <module>
File "D:\Python\lib\concurrent\futures\process.py", line 54, in <module>
Process SpawnProcess-186:6:
from . import context
File "D:\Python\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path
code, fname = _get_code_from_file(run_name, path_name)
import logging
import multiprocessing.connection
File "D:\Python\lib\multiprocessing\connection.py", line 21, in <module>
Traceback (most recent call last):
File "D:\Python\lib\multiprocessing\process.py", line 315, in _bootstrap
self.run()
File "D:\Python\lib\multiprocessing\process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
File "D:\Python\lib\multiprocessing\context.py", line 6, in <module>
File "D:\Python\lib\multiprocessing\__init__.py", line 16, in <module>
Traceback (most recent call last):
main_content = runpy.run_path(main_path,
File "D:\Python\lib\multiprocessing\__init__.py", line 16, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
File "D:\Python\lib\concurrent\futures\process.py", line 54, in <module>
from . import reduction
File "<string>", line 1, in <module>
from . import context
File "D:\Python\lib\runpy.py", line 264, in run_path
import _multiprocessing
File "D:\Python\lib\runpy.py", line 239, in _get_code_from_file
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "D:\Python\lib\concurrent\futures\process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
exitcode = _main(fd, parent_sentinel)
from . import context
import multiprocessing.connection
File "D:\Python\lib\multiprocessing\reduction.py", line 15, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
File "D:\Python\lib\multiprocessing\context.py", line 6, in <module>
code, fname = _get_code_from_file(run_name, path_name)
File "D:\Python\lib\runpy.py", line 239, in _get_code_from_file
code = compile(f.read(), fname, 'exec')
File "D:\Python\lib\multiprocessing\queues.py", line 116, in get
return _ForkingPickler.loads(res)
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\algos\Dunod\list_sorting.py", line 2, in <module>
import numpy as np
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
code = compile(f.read(), fname, 'exec')
MemoryError
import pickle
ImportError: DLL load failed while importing _multiprocessing: Le fichier de pagination est insuffisant pour terminer cette opération.
self = reduction.pickle.load(from_parent)
File "D:\Python\lib\concurrent\futures\process.py", line 54, in <module>
import multiprocessing.connection
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\numpy\__init__.py", line 140, in <module>
from . import core
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\numpy\core\__init__.py", line 98, in <module>
from . import _add_newdocs
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 779, in exec_module
File "<frozen importlib._bootstrap_external>", line 874, in get_code
File "D:\Python\lib\multiprocessing\connection.py", line 21, in <module>
import _multiprocessing
ImportError: DLL load failed while importing _multiprocessing: Le fichier de pagination est insuffisant pour terminer cette opération.
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "D:\Python\lib\multiprocessing\connection.py", line 21, in <module>
File "D:\Python\lib\pickle.py", line 34, in <module>
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
from . import reduction
File "D:\Python\lib\multiprocessing\reduction.py", line 15, in <module>
import pickle
File "D:\Python\lib\pickle.py", line 37, in <module>
File "D:\Python\lib\multiprocessing\context.py", line 6, in <module>
from . import reduction
File "D:\Python\lib\multiprocessing\reduction.py", line 15, in <module>
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
import _multiprocessing
ImportError: DLL load failed while importing _multiprocessing: Le fichier de pagination est insuffisant pour terminer cette opération.
File "<frozen importlib._bootstrap_external>", line 779, in exec_module
File "<frozen importlib._bootstrap_external>", line 874, in get_code
File "<frozen importlib._bootstrap_external>", line 973, in get_data
MemoryError
File "<frozen importlib._bootstrap_external>", line 973, in get_data
Internal Server Error: /django_plotly_dash/app/lists/_dash-update-component
concurrent.futures.process._RemoteTraceback:
"""
Traceback (most recent call last):
File "D:\Python\lib\concurrent\futures\process.py", line 239, in _process_worker
r = call_item.fn(*call_item.args, **call_item.kwargs)
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\toolbox\list_datas.py", line 47, in _sort_by_threadmerging
self.sorted_datas = multiThreadMerging(self.sorted_datas)
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\algos\Dunod\list_sorting.py", line 103, in multiThreadMerging
sides = [result for result in results]
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\algos\Dunod\list_sorting.py", line 103, in <listcomp>
sides = [result for result in results]
File "D:\Python\lib\concurrent\futures\process.py", line 484, in _chain_from_iterable_of_lists
for element in iterable:
File "D:\Python\lib\concurrent\futures\_base.py", line 611, in result_iterator
yield fs.pop().result()
File "D:\Python\lib\concurrent\futures\_base.py", line 432, in result
return self.__get_result()
File "D:\Python\lib\concurrent\futures\_base.py", line 388, in __get_result
raise self._exception
concurrent.futures.process.BrokenProcessPool: A process in the process pool was terminated abruptly while the future was running or pending.
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django\core\handlers\base.py", line 179, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django\views\decorators\csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django_plotly_dash\views.py", line 74, in update
return _update(request, ident, stateless, **kwargs)
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django_plotly_dash\views.py", line 93, in _update
resp = view_func()
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django_plotly_dash\dash_wrapper.py", line 560, in dispatch
return self.dispatch_with_args(body, argMap=dict())
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django_plotly_dash\dash_wrapper.py", line 647, in dispatch_with_args
res = self.callback_map[target_id]['callback'](*args, **argMap)
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\dash\dash.py", line 985, in add_context
output_value = func(*args, **kwargs) # %% callback invoked %%
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\Apps\lists.py", line 382, in mergeTest
data_set.run_tests('mt_merge')
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\toolbox\list_datas.py", line 104, in run_tests
self.mt_merge_datas.append((len(self.raw_datas[i].datas), result.result()))
File "D:\Python\lib\concurrent\futures\_base.py", line 432, in result
return self.__get_result()
File "D:\Python\lib\concurrent\futures\_base.py", line 388, in __get_result
raise self._exception
concurrent.futures.process.BrokenProcessPool: A process in the process pool was terminated abruptly while the future was running or pending.
HTTP POST /django_plotly_dash/app/lists/_dash-update-component 500 [1.74, 127.0.0.1:63195]

Problem with scapy summary function in python

I've imported the scapy module in my python code (arp-spoofer) and when i use the packet.show()/packet.summary() function the terminal return me this error:
Error:
Traceback (most recent call last):
File "arp-spoofer.py", line 10, in <module>
print(packet.show())
File "/home/baloo/.local/lib/python3.7/site-packages/scapy/packet.py", line 1261, in show
return self._show_or_dump(dump, indent, lvl, label_lvl)
File "/home/baloo/.local/lib/python3.7/site-packages/scapy/packet.py", line 1235, in _show_or_dump
reprval = f.i2repr(self, fvalue)
File "/home/baloo/.local/lib/python3.7/site-packages/scapy/fields.py", line 376, in i2repr
return fld.i2repr(pkt, val)
File "/home/baloo/.local/lib/python3.7/site-packages/scapy/fields.py", line 502, in i2repr
x = self.i2h(pkt, x)
File "/home/baloo/.local/lib/python3.7/site-packages/scapy/layers/l2.py", line 136, in i2h
iff = self.getif(pkt)
File "/home/baloo/.local/lib/python3.7/site-packages/scapy/layers/l2.py", line 132, in <lambda>
self.getif = (lambda pkt: pkt.route()[0]) if getif is None else getif
File "/home/baloo/.local/lib/python3.7/site-packages/scapy/layers/l2.py", line 400, in route
fld, dst = fld._find_fld_pkt_val(self, dst)
File "/home/baloo/.local/lib/python3.7/site-packages/scapy/fields.py", line 313, in _find_fld_pkt_val
if val == dflts_pkt[self.name] and self.name not in pkt.fields:
File "/home/baloo/.local/lib/python3.7/site-packages/scapy/base_classes.py", line 133, in __eq__
p2, nm2 = self._parse_net(other)
File "/home/baloo/.local/lib/python3.7/site-packages/scapy/base_classes.py", line 99, in _parse_net
tmp = net.split('/') + ["32"]
AttributeError: 'NoneType' object has no attribute 'split'
Code:
import scapy.all as scapy
victim_ip = ""
victim_mac_address = ""
router_ip = ""
packet = scapy.ARP(op=2, pdst=victim_ip, hwdst=victim_mac_address, psrc=router_ip)
print(packet.show())
print(packet.summary())
You need the IPs to be valid.
If you don't want to set them yourself, don't specify them and Scapy will take the default.

Categories