Using PyTorch to utilise DBpedia - keyerror: content disposition - python

I am currently trying to download data from the torchtext.datasets module and it is not working.
Here is the following code that I have written (taken from https://analyticsindiamag.com/multi-class-text-classification-in-pytorch-using-torchtext/):
import torch
import torchtext
from torchtext.datasets import text_classification
import os
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import time
from torch.utils.data.dataset import random_split
import re
from torchtext.data.utils import ngrams_iterator
from torchtext.data.utils import get_tokenizer
ngrams = 2
batch_size = 16
if not os.path.isdir('./.data'):
os.mkdir('./.data')
train_dataset, test_dataset = text_classification.DATASETS['DBpedia'](root='./.data', ngrams=ngrams, vocab=None)
It produces the following error:
Traceback (most recent call last):
File "/Users/aidanpayne/Desktop/Scripts/Python/Neural Networks/text_classification_model.py", line 19, in <module>
train_dataset, test_dataset = text_classification.DATASETS['DBpedia'](root='./.data', ngrams=ngrams, vocab=None)
File "/Users/aidanpayne/opt/anaconda3/lib/python3.8/site-packages/torchtext/datasets/text_classification.py", line 237, in DBpedia
return _setup_datasets(*(("DBpedia",) + args), **kwargs)
File "/Users/aidanpayne/opt/anaconda3/lib/python3.8/site-packages/torchtext/datasets/text_classification.py", line 117, in _setup_datasets
dataset_tar = download_from_url(URLS[dataset_name], root=root)
File "/Users/aidanpayne/opt/anaconda3/lib/python3.8/site-packages/torchtext/utils.py", line 100, in download_from_url
return _process_response(response, root, filename)
File "/Users/aidanpayne/opt/anaconda3/lib/python3.8/site-packages/torchtext/utils.py", line 53, in _process_response
d = r.headers['content-disposition']
File "/Users/aidanpayne/opt/anaconda3/lib/python3.8/site-packages/requests/structures.py", line 54, in __getitem__
return self._store[key.lower()][1]
KeyError: 'content-disposition'
If anyone can help, that would be great!

Related

Convert Detectron2 model to torchscript

i want to convert detectron2 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml model' to torchscript.
I used torc
my code are given below.
import cv2
import numpy as np
import torch
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.modeling import build_model
from detectron2.export.flatten import TracingAdapter
import os
ModelPath='/home/jayasanka/working_files/create_torchsript/model.pt'
with open('savepic.npy', 'rb') as f:
image = np.load(f)
#-------------------------------------------------------------------------------------
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # your number of classes + 1
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, ModelPath)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.60 # set the testing threshold for this model
predictor = DefaultPredictor(cfg)
i used TracingAdapter and trace functions.i hvent much idea whats the concept behind that.
# im = cv2.imread(image)
im = torch.tensor(image)
def inference_func(model, image):
inputs= [{"image": image}]
return model.inference(inputs, do_postprocess=False)[0]
wrapper= TracingAdapter(predictor, im, inference_func)
wrapper.eval()
traced_script_module= torch.jit.trace(wrapper, (im,))
traced_script_module.save("torchscript.pt")
it gives error given below.
Traceback (most recent call last):
File "script.py", line 49, in <module>
traced_script_module= torch.jit.trace(wrapper, (im,))
File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/torch/jit/_trace.py", line 744, in trace
_module_class,
File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/torch/jit/_trace.py", line 959, in trace_module
argument_names,
File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1039, in _slow_forward
result = self.forward(*input, **kwargs)
File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/detectron2/export/flatten.py", line 294, in forward
outputs = self.inference_func(self.model, *inputs_orig_format)
File "script.py", line 44, in inference_func
return model.inference(inputs, do_postprocess=False)[0]
File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/yacs/config.py", line 141, in __getattr__
raise AttributeError(name)
AttributeError: inference
can you help me to figure out this.
is there any other methods to do that easily?
Change to
def inference(model, inputs):
# use do_postprocess=False so it returns ROI mask
inst = model.inference(inputs, do_postprocess=False)[0]
return [{"instances": inst}]
isinstance(image, np.ndarray) == True
image_tensor = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
wrapper= TracingAdapter(predictor, inputs=[{"image": image_tensor}], inference_func=inference)

parameter gather is not used

import pandas as pd
import os
import time
from datetime import datetime
path = "/Users/tommasomasaracchio/Documents/pythonfolder"
def key_stats(gather="Total Deb/Equity (mrg)"):
statspath = path + '/KeyStats'
stock_list = [x[0] for x in os.walk(statspath)]
print(stock_list)
key_stats()
I thought the problem was gather but it seems not to be the case.
the terminal tells me this, and I don't really understand it
tommaso desktop % python programma.py
hello world
Traceback (most recent call last):
File "/Users/tommasomasaracchio/Desktop/programma.py", line 2, in <module>
import pandas as pd
File "/Users/tommasomasaracchio/opt/anaconda3/lib/python3.9/site-packages/pandas/__init__.py", line 179, in <module>
import pandas.testing
File "/Users/tommasomasaracchio/opt/anaconda3/lib/python3.9/site-packages/pandas/testing.py", line 6, in <module>
from pandas._testing import (
File "/Users/tommasomasaracchio/opt/anaconda3/lib/python3.9/site-packages/pandas/_testing/__init__.py", line 58, in <module>
from pandas._testing._io import ( # noqa:F401
File "/Users/tommasomasaracchio/opt/anaconda3/lib/python3.9/site-packages/pandas/_testing/_io.py", line 22, in <module>
from pandas._testing._random import rands
File "/Users/tommasomasaracchio/opt/anaconda3/lib/python3.9/site-packages/pandas/_testing/_random.py", line 10, in <module>
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
AttributeError: module 'string' has no attribute 'ascii_letters'

Why can't I open a .h5 file in Python?

I am trying to open an.h5 file, but experiencing an OS error.
import sys
sys.path.append('..')
from unet3d.training import load_old_model
import tables
from train_model import config
model_file=config["model_file"] #config["model_file"] = os.path.abspath("mc_seg_model.h5")
hdf5_file=config["val_data_file"] #config['val_data_file'] = os.path.abspath("../data/val_data.h5")
model = load_old_model(model_file)
load_model function is as follows:
import math
from functools import partial
import pdb
from keras import backend as K
from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping
from keras.models import load_model
import tensorflow_addons as tfa
def load_old_model(model_file):
# pdb.set_trace()
print("Loading pre-trained model")
custom_objects = {'dice_coefficient_loss': dice_coefficient_loss, 'dice_coefficient': dice_coefficient,
'weighted_dice_coefficient': weighted_dice_coefficient,
'weighted_dice_coefficient_loss': weighted_dice_coefficient_loss}
try:
#from keras_contrib.layers import InstanceNormalization
from tfa.layers import InstanceNormalization
custom_objects["InstanceNormalization"] = InstanceNormalization
except ImportError:
pass
try:
return load_model(model_file, custom_objects=custom_objects)
except ValueError as error:
if 'InstanceNormalization' in str(error):
raise ValueError(str(error) + "\n\nPlease install keras-contrib to use InstanceNormalization:\n"
"'pip install git+https://www.github.com/keras-team/keras-contrib.git'")
else:
raise error
When I try to load the model, it throws the following OS error and it is an 'Input/output error'.
2021-06-16 14:31:38.354199: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
Traceback (most recent call last):
File "draft.py", line 35, in <module>
model = load_old_model(model_file)
File "../unet3d/training.py", line 50, in load_old_model
return load_model(model_file, custom_objects=custom_objects)
File "/share/apps/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/saving/save.py", line 182, in load_model
return hdf5_format.load_model_from_hdf5(filepath, custom_objects, compile)
File "/share/apps/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/saving/hdf5_format.py", line 173, in load_model_from_hdf5
model_config = f.attrs.get('model_config')
File "/share/apps/anaconda3/lib/python3.7/_collections_abc.py", line 660, in get
return self[key]
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "/share/apps/anaconda3/lib/python3.7/site-packages/h5py/_hl/attrs.py", line 81, in __getitem__
attr.read(arr, mtype=htype)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py/h5a.pyx", line 355, in h5py.h5a.AttrID.read
File "h5py/_proxy.pyx", line 58, in h5py._proxy.attr_rw
OSError: Unable to read attribute (file read failed: time = Wed Jun 16 14:31:42 2021
, filename = '/data/kfernando/brats20/demo_task3_mcmc/mc_seg_model.h5', file descriptor = 4, errno = 5, error message = 'Input/output error', buf = 0x56126c096440, total read size = 30352, bytes this sub-read = 30352, bytes actually read = 18446744073709551615, offset = 16384)
Can someone please tell me what is causing this error?
Based on your comments about successful h5py open/close, it appears you have a valid HDF5 file. There are 2 more issues to investigate: 1) problems reading attribute data, or 2) errors in TensorFlow load_model() function. I can't help with TF. However here is a bit of code to recursively descend the data hierarchy and output all attributes and values. See below:
def get_all_attrs(name, h5_obj):
if isinstance(h5_obj,h5py.Group):
print('\n{} is a Group'.format(name))
elif isinstance(h5_obj,h5py.Dataset):
print('\n{} is a Dataset'.format(name))
print('number of attributes:',len( h5_obj.attrs.keys() ))
for k in h5_obj.attrs.keys():
print('{} => {}'.format(k, h5_obj.attrs[k]))
with h5py.File(file_path, 'r') as h5r:
print('number of root level attributes:',len( h5r.attrs.keys() ))
for k in h5r.attrs.keys():
print('{} => {}'.format(k, h5r.attrs[k]))
h5r.visititems(get_all_attrs)
Run this with your TF file. It might find a error reading one of the attributes. Example output from my test file looks like this:
number of root level attributes: 2
OS => Windows
User => Me
Base_Group is a Group
number of attributes: 2
Date => today
Time => now
Base_Group/default is a Dataset
number of attributes: 2
attr1 => 1.0
attr2 => 22.2
Group1 is a Group
number of attributes: 0
Group1/default1 is a Dataset
number of attributes: 0
This should help determine the source of the error. If h5py can read the attributes, you need to investigate TF load_data() function. If you get an error reading the attributes....well, that's your problem, but I don't know how to identify the root cause.

Geopandas not working after importing a file from a different directory

I am trying to make a map in python using shapefiles I have downloaded from bbike.org. Here is my code:
import geopandas as gpd
import os
import sys
import matplotlib.pyplot as plt
bos_files_list = ['buildings.shx', 'landuse.shx', 'natural.shx', 'places.shx', 'points.shx', 'railways.shx', 'roads.shx']
cur_path = os.path.dirname(__file__)
def maps_of_bos(files):
for x in range(len(files)):
os.chdir(f'location/of/file')
f = open(f'{files[x]}', 'r')
gpd.read_file(f)
z = maps_of_bos(bos_files_list)
z.plot()
plt.show()
However, my error output is as follows:
Traceback (most recent call last):
File "test.py", line 16, in <module>
z = maps_of_bos(bos_files_list)
File "test.py", line 13, in maps_of_bos
gpd.read_file(f)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/geopandas/io/f
ile.py", line 76, in read_file
with reader(path_or_bytes, **kwargs) as features:
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/contextlib.py", line 113, in
__enter__
return next(self.gen)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/fiona/__init__
.py", line 206, in fp_reader
dataset = memfile.open()
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/fiona/io.py",
line 63, in open
return Collection(vsi_path, 'w', crs=crs, driver=driver,
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/fiona/collecti
on.py", line 126, in __init__
raise DriverError("no driver")
fiona.errors.DriverError: no driver
I am relatively new to python, and don't really understand my error. can someone please help me?
According to the docs read_file should take the path to the file not an object.
gpd.read_file(f'{files[x]}')
you dont need
f = open(f'{files[x]}', 'r')

Unable to import tensor flow and keras properly

I have wrote this code in python here. I have a dataset in json and i am trying to train that dataset using keras. I was able to load data perfectly but after writing the complete code to train my dataset and running it, it started giving me errors in imports of my program. I tried installing tensorflowjs and keras using pip again but everytime i try tensorflowjs it start giving me compatibilty error on kerasApplications version. I tried installing every version but it is still giving me the same error. How can i get out of this trouble?
import json
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
import tensorflowjs as tfjs
with open("C:\\Users\\TechProBox\\Desktop\\Model.json") as f:
data = json.load(f)
x1 = np.array(data['attiude.roll'])
y1 = np.array(data['attitude.pitch'])
z1 = np.array(data['attitude.yaw'])
x2 = np.array(data['gravity.x'])
y2 = np.array(data['gravity.y'])
z2 = np.array(data['gravity.z'])
x3 = np.array(data['rotationRate.x'])
y3 = np.array(data['rotationRate.y'])
z3 = np.array(data['rotationRate.z'])
x4 = np.array(data['userAcceleration.x'])
y4 = np.array(data['userAcceleration.y'])
z4 = np.array(data['userAcceleration.z'])
x1_train = x1[:-10000]
y1_train = y1[:-10000]
z1_train = z1[:-10000]
x2_train = x2[:-10000]
y2_train = y2[:-10000]
z2_train = z2[:-10000]
x3_train = x3[:-10000]
y3_train = y3[:-10000]
z3_train = z3[:-10000]
x4_train = x4[:-10000]
y4_train = y4[:-10000]
z4_train = z4[:-10000]
x1_test = x1[:-10000]
y1_test = y1[:-10000]
z1_test = z1[:-10000]
x2_test = x2[:-10000]
y2_test = y2[:-10000]
z2_test = z2[:-10000]
x3_test = x3[:-10000]
y3_test = y3[:-10000]
z3_test = z3[:-10000]
x4_test = x4[:-10000]
y4_test = y4[:-10000]
z4_test = z4[:-10000]
model = Sequential()
model.add(Dense(64, activation='relu', input_dim=6))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
adam = keras.optimizers.Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
model.fit(x1_train, y1_train, z1_train, x2_train, y2_train, z2_train,x3_train, y3_train, z3_train,
x4_train, y4_train, z4_train,
epochs=14,
batch_size=128)
score = model.evaluate(x1_test, y2_test, z3_test, x2_test, y2_test, z2_test, x3_test, y3_test, z3_test,
x4_test, y4_test, z4_test, batch_size=128)
print(score)
model.save("Keras-64*2-10epoch")
tfjs.converters.save_keras_model(model,"tfjsv3")
Here are the errors
Using TensorFlow backend.
Traceback (most recent call last):
File "C:\Users\TechProBox\Desktop\Python1.py", line 3, in <module>
import keras
File "C:\Program Files\Python36\lib\site-packages\keras\__init__.py", line 3, in <module>
from . import utils
File "C:\Program Files\Python36\lib\site-packages\keras\utils\__init__.py", line 6, in <module>
from . import conv_utils
File "C:\Program Files\Python36\lib\site-packages\keras\utils\conv_utils.py", line 9, in <module>
from .. import backend as K
File "C:\Program Files\Python36\lib\site-packages\keras\backend\__init__.py", line 89, in <module>
from .tensorflow_backend import *
File "C:\Program Files\Python36\lib\site-packages\keras\backend\tensorflow_backend.py", line 5, in <module>
import tensorflow as tf
File "C:\Program Files\Python36\lib\site-packages\tensorflow\__init__.py", line 22, in <module>
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
File "C:\Program Files\Python36\lib\site-packages\tensorflow\python\__init__.py", line 52, in <module>
from tensorflow.core.framework.graph_pb2 import *
File "C:\Program Files\Python36\lib\site-packages\tensorflow\core\framework\graph_pb2.py", line 6, in <module>
from google.protobuf import descriptor as _descriptor
File "C:\Program Files\Python36\lib\site-packages\google\protobuf\descriptor.py", line 47, in <module>
from google.protobuf.pyext import _message
ImportError: DLL load failed: The specified procedure could not be found.
Just needed to isntall protobuff and it worked
pip install protobuf==3.6.0

Categories