I am using CLIMADA which is a probabilistic natural catastrophe impact model for a project on tropical cyclone impacts in Florida. The following piece of code rises an AttributeError.
import os
import numpy as np
import pandas as pd
import xarray as xr
import netCDF4 as nc
import datetime as dt
# import CLIMADA modules:
from climada.util.constants import SYSTEM_DIR, DATA_DIR # loads default directory paths for data
from climada.engine import Impact
from climada.hazard import TCTracks, Centroids, TropCyclone
from climada.entity import IFTropCyclone, ImpactFuncSet
from climada.entity.exposures.litpop import LitPop
from climada.entity.exposures.litpop import exposure_set_admin1
import climada.util.plot as u_plot
from climada.util.scalebar_plot import scale_bar
# Initiate EXPOSURE:
exp = LitPop()
countries_list = ['USA']
state_list = ['Florida']
exp.set_country(countries_list, admin1_calc = True, res_arcsec=300, reference_year=2014)
exp.set_geometry_points()
exp.set_lat_lon()
exp.check()
exp['if_TC'] = 1
exposure_set_admin1(exp,res_arcsec=300)
exp = exp[exp['admin1'] == 'Florida']
# Initiate TC hazard from tracks and exposure
tc_hazard = TropCyclone()
tc_hazard.set_from_tracks(tracks, centroids=cent)
tc_hazard.check()
# Calculate TC impact from exposure and hazard and creat impact and impact function set (IFS)
# define impact function:
if_tc = IFTropCyclone()
if_tc.haz_type = 'TC'
if_tc.id = 1
if_tc.set_emanuel_usa(v_thresh=25.7, v_half=84.7, scale=1)
IFS = ImpactFuncSet()
IFS.append(if_tc)
# compute impact:
impact = Impact()
impact.calc(exp, IFS, tc_hazard, save_mat=True)
Calling the last line of the code yields:
AttributeError: 'GeoDataFrame' object has no attribute 'assign_centroids'
Can anyone solve the Error?
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-9-e5655feac3c6> in <module>
1 # compute impact:
2 impact = Impact()
----> 3 impact.calc(exp, IFS, tc_hazard, save_mat=True)
~/Documents/WCR/CLIMADA_develop/climada_python/climada/engine/impact.py in calc(self, exposures, impact_funcs, hazard, save_mat)
153 assign_haz = INDICATOR_CENTR + hazard.tag.haz_type
154 if assign_haz not in exposures:
--> 155 exposures.assign_centroids(hazard)
156 else:
157 LOGGER.info('Exposures matching centroids found in %s', assign_haz)
~/opt/anaconda3/envs/climada_env/lib/python3.7/site-packages/pandas/core/generic.py in __getattr__(self, name)
5065 if self._info_axis._can_hold_identifiers_and_holds_name(name):
5066 return self[name]
-> 5067 return object.__getattribute__(self, name)
5068
5069 def __setattr__(self, name, value):
AttributeError: 'GeoDataFrame' object has no attribute 'assign_centroids'
```
The error occurs when you call the line exp = exp[exp['admin1'] == 'Florida']. This converts your Exposures object to a GeoPandas object. If you would run the exp.check() after this line it would fail.
The solution is to reconvert it back to an Exposures. As all the information is still included in exp this is easy
from climada.entity import Exposures
...
exp = exp[exp['admin1'] == 'Florida']
exp = Exposures(exp)
This is I think an effect of how inherited classes work in Python.
Related
I'm trying to backtest strategies using Backtesting.py. When I run this code I get the error
import yfinance as yf
import pandas as pd
import matplotlib.pyplot as plt
import ta
from backtesting import Backtest, Strategy
from backtesting.lib import crossover
from ta.volatility import BollingerBands
from ta.momentum import RSIIndicator
from backtesting.test import GOOG
# get Ethereum data from Yahoo Finance
eth = yf.download("ETH-USD", start ="2018-01-01")
class RsiOscillator(Strategy):
upper_bound = 70
lower_bound = 30
def init(self):
self.rsi = self.I(ta.momentum.RSIIndicator,self.data.Close,14)
def next(self):
if crossover(self.rsi, self.upper_bound):
self.position.close()
elif crossover(self.lower_bound, self.rsi):
self.buy()
bt = Backtest(GOOG,RsiOscillator,cash=10000,commission=0.002)
stats = bt.run()
print(stats)
RuntimeError: Indicator "RSIIndica…(C,14)" errored with exception: '_Array' object has no attribute 'diff'
I saw this code in a video and it runs perfectly so I'm not sure what the issue is?
Try to convert the type of self.data.Close, like this:
pd.Series(self.data.Close)
And you may face the AttributeError.
try this:
self.upper_bound = 70
self.lower_bound = 30
Working in Colab, starting to build an implied vol surface in Python for 0DTE options trades. imported yfinance, pandas, numby successfully - can't find simple solution to this error?
AttributeError Traceback (most recent call last)
<ipython-input-31-494e77cf0df4> in <module>
----> 1 options = option_chains("ES")
2
3 puts = [options["optionType"] == "put"]
4
5 # print the expirations
1 frames
/usr/local/lib/python3.7/dist-packages/pandas/core/generic.py in __getattr__(self, name)
5485 ):
5486 return self[name]
-> 5487 return object.__getattribute__(self, name)
5488
5489 def __setattr__(self, name: str, value) -> None:
AttributeError: 'DataFrame' object has no attribute 'expiration'
........................
code:
options = option_chains("SPY")
puts = [options["optionType"] == "put"]
# print the expirations
set(puts.expiration)
# select an expiration to plot
puts_at_expiry = puts[puts["expiration"] == "2022-12-2 23:59:59"]
# filter out low vols
filtered_puts_at_expiry = puts_at_expiry[puts_at_expiry.impliedVolatility >= 0.001]
# set the strike as the index so pandas plots nicely filtered_puts_at_expiry[["strike","impliedVolatility"]].set_index("strike").plot(title="Implied Volatility Skew", figsize=(7, 4))
guessing added these 2
from pandas_datareader import data as pdr
!pip install yfinance
in addition to yfinance
I am trying to learn OOP programming and I want to learn how to "re-use" existing classes. I decided to practice using time series stats model. My goal is to import several methods (ARIMA, SARIMA and so on). Create a class where I indicate the time series model and create fit and predict methods. Maybe this is a bit trivial but suits for learning purposes.
import pandas as pd
import numpy as np
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.arima.model import ARIMA
from abc import ABCMeta
from typing import Any, Dict, Tuple, List
class TimeSeriesModels:
def __init__(self, model_name: str, model_config: Dict[str, Any]):
self.model_name = model_name
self.model_config = model_config
self.model = None
##endog = ???
self._instantiate_ts_model()
def _instantiate_ts_model(self):
available_models = self.available_models()
available_model_names = [el.lower() for el in available_models.keys()]
if self.model_name.lower() in available_model_names:
self.model = available_models[self.model_name](**self.model_config)
else:
raise ValueError(f"Model {self.model_name} is not implemented yet.")
#staticmethod
def available_models() -> list:
return {"ARIMA": ARIMA, "SARIMA": SARIMAX, "ExponentialSmoothing": ExponentialSmoothing}
def fit(self):
self.model.fit()
def predict(self,Y: pd.DataFrame):
return self.model.predict(Y)
In statsmodels the way to fit data is a bit different. Currently, when I call the methods
import random
randomlist = random.sample(range(10, 100), 80)
Y_train = randomlist[:60]
Y_test = randomlist[60:]
TimeSeriesModels.available_models()
#start the model with parameters
ts = TimeSeriesModels(model_name="ExponentialSmoothing", model_config={'initialization_method': 'estimated'})
ts.fit()
ts.predict(Y_test)
This is the error that I have, I am not sure in my case how I can pass the data? I am not able to use it in fit function and I am not sure how to add it to the class. Please can someone help/explain me what is wrong my code?
Input In [99], in TimeSeriesModels.__init__(self, model_name, model_config)
17 self.model = None
18 ##endog = ???
---> 20 self._instantiate_ts_model()
Input In [99], in TimeSeriesModels._instantiate_ts_model(self)
25 available_model_names = [el.lower() for el in available_models.keys()]
27 if self.model_name.lower() in available_model_names:
---> 28 self.model = available_models[self.model_name](**self.model_config)
29 else:
30 raise ValueError(f"Model {self.model_name} is not implemented yet.")
File ~\AppData\Roaming\Python\Python39\site-packages\pandas\util\_decorators.py:199, in deprecate_kwarg.<locals>._deprecate_kwarg.<locals>.wrapper(*args, **kwargs)
197 else:
198 kwargs[new_arg_name] = new_arg_value
--> 199 return func(*args, **kwargs)
TypeError: __init__() missing 1 required positional argument: 'endog'
Why do i get an attribute error when i run this code in jupyter ? I am trying to figure out how to use Neurokit.
Ive tried to look through the modules one by one, but i seem to find the error.
import neurokit as nk
import pandas as pd
import numpy as np
import sklearn
df = pd.read_csv("https://raw.githubusercontent.com/neuropsychology/NeuroKit.py/master/examples/Bio/bio_100Hz.csv")
# Process the signals
bio = nk.bio_process(ecg=df["ECG"], rsp=df["RSP"], eda=df["EDA"], add=df["Photosensor"], sampling_rate=1000 )
Output Message:
AttributeError Traceback (most recent call last)
<ipython-input-2-ad0abf8de45e> in <module>
11
12 # Process the signals
---> 13 bio = nk.bio_process(ecg=df["ECG"], rsp=df["RSP"], eda=df["EDA"], add=df["Photosensor"], sampling_rate=1000 )
14 # Plot the processed dataframe, normalizing all variables for viewing purpose
15 nk.z_score(bio["df"]).plot()
~\Anaconda3\lib\site-packages\neurokit\bio\bio_meta.py in bio_process(ecg, rsp, eda, emg, add, sampling_rate, age, sex, position, ecg_filter_type, ecg_filter_band, ecg_filter_frequency, ecg_segmenter, ecg_quality_model, ecg_hrv_features, eda_alpha, eda_gamma, scr_method, scr_treshold, emg_names, emg_envelope_freqs, emg_envelope_lfreq, emg_activation_treshold, emg_activation_n_above, emg_activation_n_below)
123 # ECG & RSP
124 if ecg is not None:
--> 125 ecg = ecg_process(ecg=ecg, rsp=rsp, sampling_rate=sampling_rate, filter_type=ecg_filter_type, filter_band=ecg_filter_band, filter_frequency=ecg_filter_frequency, segmenter=ecg_segmenter, quality_model=ecg_quality_model, hrv_features=ecg_hrv_features, age=age, sex=sex, position=position)
126 processed_bio["ECG"] = ecg["ECG"]
127 if rsp is not None:
~\Anaconda3\lib\site-packages\neurokit\bio\bio_ecg.py in ecg_process(ecg, rsp, sampling_rate, filter_type, filter_band, filter_frequency, segmenter, quality_model, hrv_features, age, sex, position)
117 # ===============
118 if quality_model is not None:
--> 119 quality = ecg_signal_quality(cardiac_cycles=processed_ecg["ECG"]["Cardiac_Cycles"], sampling_rate=sampling_rate, rpeaks=processed_ecg["ECG"]["R_Peaks"], quality_model=quality_model)
120 processed_ecg["ECG"].update(quality)
121 processed_ecg["df"] = pd.concat([processed_ecg["df"], quality["ECG_Signal_Quality"]], axis=1)
~\Anaconda3\lib\site-packages\neurokit\bio\bio_ecg.py in ecg_signal_quality(cardiac_cycles, sampling_rate, rpeaks, quality_model)
355
356 if quality_model == "default":
--> 357 model = sklearn.externals.joblib.load(Path.materials() + 'heartbeat_classification.model')
358 else:
359 model = sklearn.externals.joblib.load(quality_model)
AttributeError: module 'sklearn' has no attribute 'externals'
You could downgrade you scikit-learn version if you don't need the most recent fixes using
pip install scikit-learn==0.20.1
There is an issue to fix this problem in future version:
https://github.com/neuropsychology/NeuroKit.py/issues/101
I'm executing the exact same code as you and run into the same problem.
I followed the link indicated by Louis MAYAUD and there they suggest to just add
from sklearn.externals import joblib
That solves everything and you don't need to downgrade scikit-learn version
Happy code! :)
I get the following output from the unit test below:
[[array([[-1.57079633]])]]
[[array([[0.+1.57079633j]])]]
<module 'numpy' from '/usr/local/lib/python2.7/dist-packages/numpy/__init__.pyc'>
E
======================================================================
ERROR: test_TestWECTrain_BasicEnv_SetupAndStepping (__main__.Test_exp)
----------------------------------------------------------------------
Traceback (most recent call last):
File "Test_exp.py", line 34, in test_TestWECTrain_BasicEnv_SetupAndStepping
expsigmatphase = np.exp(tmp)
AttributeError: exp
----------------------------------------------------------------------
Ran 1 test in 0.001s
FAILED (errors=1)
Here is the unit test
import unittest
import os
import scipy.io as sio
import numpy as np
from pprint import pprint
class Test_exp (unittest.TestCase):
def test_exp (self):
data_file = "test_buoysimoptions.mat"
buoysimoptions = sio.loadmat (data_file)
t = 0.0
phase = buoysimoptions['SeaParameters']['phase']
sigma = buoysimoptions['SeaParameters']['sigma']
sigmatminusphase = sigma * t - phase; print (sigmatminusphase)
tmp = -1.0j * sigmatminusphase; print (tmp)
print (np)
tmp = np.asarray(tmp)
expsigmatphase = np.exp(tmp)
if __name__ == '__main__':
unittest.main()
The input file (2.9kB) can be downloaded here: https://www.dropbox.com/s/psq1gq8xpjivrim/test_buoysimoptions.mat?dl=0
Why do I get the error AttributeError: exp?
Note this is identical to "AttributeError: exp" while using numpy.exp() on an apparently ordinary array but this question was never answered and provides no minimal example like I do.
This is in Python 2.7, In Python 3.5 I get:
[[array([[-1.57079633]])]]
[[array([[0.+1.57079633j]])]]
E
======================================================================
ERROR: test_exp (__main__.Test_exp)
----------------------------------------------------------------------
Traceback (most recent call last):
File "Test_exp.py", line 25, in test_exp
expsigmatphase = np.exp(tmp)
AttributeError: 'numpy.ndarray' object has no attribute 'exp'
----------------------------------------------------------------------
Ran 1 test in 0.002s
FAILED (errors=1)
Edit: some further information on the loaded data
I expected buoysimoptions['SeaParameters']['phase'] to just be a numpy array, but it seems not, see below, which ultimately causes the error
>>> phase = buoysimoptions['SeaParameters']['phase']
>>> phase
array([[array([[1.57079633]])]], dtype=object)
>>> phase = buoysimoptions['SeaParameters']['phase'][0]
>>> phase
array([array([[1.57079633]])], dtype=object)
>>> phase = buoysimoptions['SeaParameters']['phase'][0][0]
>>> phase
array([[1.57079633]])
do I need to index [0][0] always to just get the actual array? What is the right thing to do here? If I use the last one, the exp error goes away.
It turns out the answer is simple, these loaded variables were themselves oringinally matlab structures, and I was omitting the index when retrieving them, the correct thing to do is the following (note the extra [0,0]s when retrieving phase and sigma):
import unittest
import os
import scipy.io as sio
import numpy as np
from pprint import pprint
class Test_exp (unittest.TestCase):
def test_exp (self):
data_file = "test_buoysimoptions.mat"
buoysimoptions = sio.loadmat (data_file)
t = 0.0
phase = buoysimoptions['SeaParameters'][0,0]['phase']
sigma = buoysimoptions['SeaParameters'][0,0]['sigma']
sigmatminusphase = sigma * t - phase; print (sigmatminusphase)
tmp = -1.0j * sigmatminusphase; print (tmp)
print (np)
tmp = np.asarray(tmp)
expsigmatphase = np.exp(tmp)
if __name__ == '__main__':
unittest.main()