I am following an example on datacamp which is using a deprecated version of rasa_nlu. The sample code of the datacamp example looks like this.
# Import necessary modules
from rasa_nlu.converters import load_data
from rasa_nlu.config import RasaNLUConfig
from rasa_nlu.model import Trainer
# Create args dictionary
args = {"pipeline": "spacy_sklearn"}
# Create a configuration and trainer
config = RasaNLUConfig(cmdline_args=args)
trainer = Trainer(config)
# Load the training data
training_data = load_data("./training_data.json")
# Create an interpreter by training the model
interpreter = trainer.train(training_data)
# Test the interpreter
print(interpreter.parse("I'm looking for a Mexican restaurant in the North of town"))
This example imports RasaNLUConfig from rasa_nlu.config to create a config and trainer.
My question is how do I make something like this with the newer rasa 1.1x? The code that I wrote looks like this
from rasa_nlu.training_data import load_data
#Instead of from rasa_nlu import config the deprecated version used 'from rasa_nlu.config import RasaNLUConfig'
from rasa_nlu import config
from rasa_nlu.model import Trainer
#create args dictionary
args = {"pipeline": "spacy_sklearn"}
#create a configuration and trainer
config= RasaNLUConfig(cmdline_args=args)
trainer = Trainer(config)
#load training data
training_data = load_data('/content/nluintent.md')
#Create an interpreter by training the model
interpreter = trainer.train(training_data)
print(interpreter.parse('Hi, can you help me?'))
How would I be able to train the model using the new version of Rasa?
Related
I have a model in pickle format ,when i am trying to save that model to my local i am getting this error,model is trained on pycaret,
can someone tell me what is wrong
from asyncore import read
import logging
import pickle
import mlflow
from numpy import save
from setuptools import setup
import wrapper
import os
import sqlite3
import sqlalchemy
import sys
from mlflow.models.signature import infer_signature
from pycaret.classification import save_model
class MlflowModelService:
def saveModel(self,model,variant,readable_model_id,preprocess_file_path=None):
print('inside storeModel of model service.......')
readable_model_id = readable_model_id.replace("/","__$__")
model_name = "Original-Model"
with mlflow.start_run() as active_run:#mlflow work starts
active_run = mlflow.active_run()
#mlflow.keras.save_model(model,model_name) #Save a scikit-learn model to a path on the local file system
#print(model , model_name)
#exp_clf101 = setup(data = dataset, target = 'result', use_gpu=False, silent=True)
save_model(model , model_name)#ERROR ON THIS LINE
pyfunc_model_uri = self.logModel(readable_model_id,model_name,preprocess_file_path)
self.registerModel(pyfunc_model_uri,readable_model_id)
I was trying to run this notebook on colab,
https://colab.research.google.com/github/https-deeplearning-ai/GANs-Public/blob/master/C1W1_(Colab)_Inputs_to_a_pre_trained_GAN.ipynb ,
but first I got this :
ValueError: Tensorflow 1 is unsupported in Colab.
then I upgraded it using this script:
import tensorflow as tf
!tf_upgrade_v2 \
--intree stylegan/ \
--inplace
and I did comment these:
%tensorflow_version 1.x
tflib.init_tf()
but I got this one! and couldn't solve:
AttributeError: Can't get attribute 'Network' on <module 'dnnlib.tflib.network' from '/content/stylegan/dnnlib/tflib/network.py'>
Can somebody help?
# Clone the official StyleGAN repository from GitHub
!git clone https://github.com/NVlabs/stylegan.git
%tensorflow_version 1.x
import os
import pickle
import numpy as np
import PIL.Image
import stylegan
from stylegan import config
from stylegan.dnnlib import tflib
from tensorflow.python.util import module_wrapper
module_wrapper._PER_MODULE_WARNING_LIMIT = 0
# Initialize TensorFlow
tflib.init_tf()
# Go into that cloned directory
path = 'stylegan/'
if "stylegan" not in os.getcwd():
os.chdir(path)
# Load pre-trained network
# url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # Downloads the pickled model file: karras2019stylegan-ffhq-1024x1024.pkl
url = 'https://bitbucket.org/ezelikman/gans/downloads/karras2019stylegan-ffhq-1024x1024.pkl'
with stylegan.dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
print(f)
_G, _D, Gs = pickle.load(f)
# Gs.print_layers() # Print network details
i'm triying to train nlu model in colab, then i gets this error. I use rasa 2.6.3
ValueError: Unknown data format for file /content/drive/MyDrive/my_project/data/nlu/nlu.yml
here is my code
# Import modules for training
from rasa_nlu.training_data import load_data
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.model import Trainer
from rasa_nlu import config
# loading the nlu training samples
training_data = load_data("/content/drive/MyDrive/my_project/data/nlu/nlu.yml")
trainer = Trainer(config.load("/content/drive/MyDrive/my_project/config.yml"))
# training the nlu
interpreter = trainer.train(training_data)
model_directory = trainer.persist("/content/drive/MyDrive/my_project/models/", fixed_model_name="current")
I have trained a CNN using fastai on Kaggle and also on my local machine. After calling learn.fit_one_cycle(1) on Kaggle I get the following table as output:
I executed the exact same code on my local machine (with Spyder ide and Python 3.7) and everything works, but I cannot see that output table. How can I display it?
This is the complete code:
from fastai import *
from fastai.vision import *
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
bs = 32
path = 'C:\\DB\\UCMerced_LandUse\\UCMerced_LandUse\\Unfoldered_Images'
pat = r"([^/\d]+)[^/]*$"
fnames = get_image_files(path)
data = ImageDataBunch.from_name_re(path, fnames, pat, ds_tfms=get_transforms(),
size = 224, bs = bs, num_workers = 0).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet34, metrics=[accuracy])
learn.fit_one_cycle(1)
The problem was that the console in Spyder was set to 'execute in current console' which doesn't seem to be able to displaye the result table. Setting it to 'execute in an external system terminal' solved the problem.
If I run
from sklearn.datasets import load_breast_cancer
import lightgbm as lgb
breast_cancer = load_breast_cancer()
data = breast_cancer.data
target = breast_cancer.target
params = {
"task": "convert_model",
"convert_model_language": "cpp",
"convert_model": "test.cpp",
}
gbm = lgb.train(params, lgb.Dataset(data, target))
then I was expecting that a file called test.cpp would be created, with the model saved in c++ format.
However, nothing appears in my current directory.
I have read the documentation (https://lightgbm.readthedocs.io/en/latest/Parameters.html#io-parameters), but can't tell what I'm doing wrong.
Here's a real 'for dummies' answer:
Install the CLI version of lightgbm: https://lightgbm.readthedocs.io/en/latest/Installation-Guide.html
Make note of your installation path, and find the executable. For example, for me, this was ~/LightGBM/lightgbm.
Run the following in a Jupyter notebook:
from sklearn.datasets import load_breast_cancer
import pandas as pd
breast_cancer = load_breast_cancer()
data = pd.DataFrame(breast_cancer.data)
target = pd.DataFrame(breast_cancer.target)
pd.concat([target, data], axis=1).to_csv("regression.train", header=False, index=False)
train_conf = """
task = train
objective = binary
metric = auc
data = regression.train
output_model = trained_model.txt
"""
with open("train.conf", "w") as f:
f.write(train_conf)
conf_convert = """
task = convert_model
input_model= trained_model.txt
"""
with open("convert.conf", "w") as f:
f.write(conf_convert)
! ~/LightGBM/lightgbm config=train.conf
! ~/LightGBM/lightgbm config=convert.conf
Your model with be saved in your current directory.
In the doc they say:
Note: can be used only in CLI version
under the convert_model and convert_model_language parameters.
That means that you should probably use the CLI (Command Line Interfarce) of LGBM instead of the python wrapper to do this.
Link to Quick Start CLI version.