My DataFrame:
from random import random, randint
from pandas import DataFrame
t = DataFrame({"metasearch":["A","B","A","B","A","B","A","B"],
"market":["A","B","A","B","A","B","A","B"],
"bid":[random() for i in range(8)],
"clicks": [randint(0,10) for i in range(8)],
"country_code":["A","A","A","A","A","B","A","B"]})
I want to fit LinearRegression for each market, so I:
1) Group df - groups = t.groupby(by="market")
2) Prepare function to fit model on a group -
from sklearn.linear_model import LinearRegression
def group_fitter(group):
lr = LinearRegression()
X = group["bid"].fillna(0).values.reshape(-1,1)
y = group["clicks"].fillna(0)
lr.fit(X, y)
return lr.coef_[0] # THIS IS A SCALAR
3) Create a new Series with market as an index and coef as a value:
s = groups.transform(group_fitter)
But the 3rd step fails: KeyError: ('bid_cpc', 'occurred at index bid')
I think you need instead transform use apply because working with more columns in function together and for new column use join:
from sklearn.linear_model import LinearRegression
def group_fitter(group):
lr = LinearRegression()
X = group["bid"].fillna(0).values.reshape(-1,1)
y = group["clicks"].fillna(0)
lr.fit(X, y)
return lr.coef_[0] # THIS IS A SCALAR
groups = t.groupby(by="market")
df = t.join(groups.apply(group_fitter).rename('new'), on='market')
print (df)
bid clicks country_code market metasearch new
0 0.462734 9 A A A -8.632301
1 0.438869 5 A B B 6.690289
2 0.047160 9 A A A -8.632301
3 0.644263 0 A B B 6.690289
4 0.579040 0 A A A -8.632301
5 0.820389 6 B B B 6.690289
6 0.112341 5 A A A -8.632301
7 0.432502 0 B B B 6.690289
Just return the group from the function instead of the coefficient.
# return the group instead of scaler value
def group_fitter(group):
lr = LinearRegression()
X = group["bid"].fillna(0).values.reshape(-1,1)
y = group["clicks"].fillna(0)
lr.fit(X, y)
group['coefficient'] = lr.coef_[0] # <- This is the changed line
return group
# the new column gets added to the data
s = groups.apply(group_fitter)
Related
The above screenshot is refereed to as: sample.xlsx. I've been having trouble getting the beta for each stock using the LinearRegression() function.
Input:
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
df = pd.read_excel('sample.xlsx')
mean = df['ChangePercent'].mean()
for index, row in df.iterrows():
symbol = row['stock']
perc = row['ChangePercent']
x = np.array(perc).reshape((-1, 1))
y = np.array(mean)
model = LinearRegression().fit(x, y)
print(model.coef_)
Output:
Line 16: model = LinearRegression().fit(x, y)
"Singleton array %r cannot be considered a valid collection." % x
TypeError: Singleton array array(3.34) cannot be considered a valid collection.
How can I make the collection valid so that I can get a beta value(model.coef_) for each stock?
X and y must have same shape, so you need to reshape both x and y to 1 row and 1 column. In this case it is resumed to the following:
np.array(mean).reshape(-1,1) or np.array(mean).reshape(1,1)
Given that you are training 5 classifiers, each one with just one value, is not surprising that the 5 models will "learn" that the coefficient of the linear regression is 0 and the intercept is 3.37 (y).
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
df = pd.DataFrame({
"stock": ["ABCD", "XYZ", "JK", "OPQ", "GHI"],
"ChangePercent": [-1.7, 30, 3.7, -15.3, 0]
})
mean = df['ChangePercent'].mean()
for index, row in df.iterrows():
symbol = row['stock']
perc = row['ChangePercent']
x = np.array(perc).reshape(-1,1)
y = np.array(mean).reshape(-1,1)
model = LinearRegression().fit(x, y)
print(f"{model.intercept_} + {model.coef_}*{x} = {y}")
Which is correct from an algorithmic point of view, but it doesn't make any practical sense given that you're only providing one example to train each model.
I am trying to get RF feature importance, I fit the random forest on the data like this:
model = RandomForestRegressor()
n = model.fit(self.X_train,self.y_train)
if n is not None:
df = pd.DataFrame(data = n , columns = ["Feature","Importance_Score"])
df["Feature_Name"] = np.array(self.X_Headers)
df = df.drop(["Feature"], axis = 1)
df[["Feature_Name","Importance_Score"]].to_csv("RF_Importances.csv", index = False)
del df
However, the n variable returns None, why is this happening?
Not very sure how model.fit(self.X_train,self.y_train) is supposed to work. Need more information about how you set up the model.
If we set this up using simulated data, it works:
np.random.seed(111)
X = pd.DataFrame(np.random.normal(0,1,(100,5)),columns=['A','B','C','D','E'])
y = np.random.normal(0,1,100)
model = RandomForestRegressor()
n = model.fit(X,y)
if n is not None:
df = pd.DataFrame({'features':X.columns,'importance':n.feature_importances_})
df
features importance
0 A 0.176091
1 B 0.183817
2 C 0.169927
3 D 0.267574
4 E 0.202591
In the case a dataframe has two or more columns with numerical and text values, and one Label/Target column, if I want to apply a model like svm, how can I use only the columns I am more interested in?
Ex.
Data Num Label/Target No_Sense
What happens here? group1 1 Migrate
Customer Management group2 0 Change Stage
Life Cycle Stages group1 1 Restructure
Drop-down allows to select status type group3 1 Restructure Status
and so.
The approach I have taken is
1.encode "Num" column:
one_hot = pd.get_dummies(df['Num'])
df = df.drop('Num',axis = 1)
df = df.join(one_hot)
2.encode "Data" column:
def bag_words(df):
df = basic_preprocessing(df)
count_vectorizer = CountVectorizer()
count_vectorizer.fit(df['Data'])
list_corpus = df["Data"].tolist()
list_labels = df["Label/Target"].tolist()
X = count_vectorizer.transform(list_corpus)
return X, list_labels
Then apply bag_words to the dataset
X, y = bag_words(df)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=40)
Is there anything that I missed in these steps? How can I select only "Data" and "Num" features in my training dataset? (as I think "No_Sense" is not so relevant for my purposes)
EDIT: I have tried with
def bag_words(df):
df = basic_preprocessing(df)
count_vectorizer = CountVectorizer()
count_vectorizer.fit(df['Data'])
list_corpus = df["Data"].tolist()+ df["group1"].tolist()+df["group2"].tolist()+df["group3"].tolist() #<----
list_labels = df["Label/Target"].tolist()
X = count_vectorizer.transform(list_corpus)
return X, list_labels
but I have found the error:
TypeError: 'int' object is not iterable
I hope this helps you:
import pandas as pd
import numpy as np
import re
from sklearn.feature_extraction.text import CountVectorizer
#this part so I can recreate you df from the string you posted
#remove this part !!!!
data="""
Data Num Label/Target No_Sense
What happens here? group1 1 Migrate
Customer Management group2 0 Change Stage
Life Cycle Stages group1 1 Restructure
Drop-down allows to select status type group3 1 Restructure Status
"""
df = pd.DataFrame(np.array( [ re.split(r'\s{2,}', line) for line in lines[1:] ] ),
columns = lines[0].split())
#what you want starts from here!!!!:
one_hot = pd.get_dummies(df['Num'])
df = df.drop('Num',axis = 1)
df = df.join(one_hot)
#at this point you have 3 new fetures for 'Num' variable
def bag_words(df):
count_vectorizer = CountVectorizer()
count_vectorizer.fit(df['Data'])
matrix = count_vectorizer.transform(df['Data'])
#this dataframe: `encoded_df`has 15 new features, these are the result of fitting
#the CountVectorizer to the 'Data' variable
encoded_df = pd.DataFrame(data=matrix.toarray(), columns=["Data"+str(i) for i in range(matrix.shape[1])])
#adding them to the dataframe
df.join(encoded_df)
#getting the numpy arrays that you can use in training
X = df.loc[:, ["Data"+str(i) for i in range(matrix.shape[1])] + ["group1", "group2", "group3"]].to_numpy()
y = df.loc[:, ["Label/Target"]].to_numpy()
return X, y
X, y = bag_words(df)
How do I get the regression intercept and coefficient data for unique IDs in a dataframe into a single dataframe where each row has the UID, it's intercept, and it's coefficients?
This is a snippet of what my raw data looks like. Future data can have more UIDs and more fields (independent variables).
UID
A1
A2
A3
A4
Rating
1
0.377489423
0.950311846
0.892135293
0.077054085
4
1
0.595570737
0.824334482
0.388634543
0.947936483
4
1
0.585703124
0.825486315
0.569809886
0.321117521
3
1
0.386968371
0.594556911
0.260187376
0.394238102
4
1
0.532731866
0.219741858
0.865710517
0.173044631
3
1
0.16565561
0.125096015
0.881841651
0.494690133
4
2
0.42418965
0.814894214
0.989426645
0.871014023
1
2
0.742604257
0.571780036
0.247811255
0.468820653
2
2
0.401989919
0.375134173
0.539599593
0.443260146
3
2
0.167910365
0.940073739
0.490081723
0.803074574
5
2
0.614160221
0.045817359
0.077645469
0.367456074
4
3
0.866397055
0.2932472
0.968410252
0.348542304
5
3
0.141680391
0.998446121
0.201506356
0.689863785
1
3
0.407182414
0.721650663
0.174277013
0.922810374
1
Here is the code I wrote to loop through each unique UID and run the linear model and add the intercept and coefficients for each UID to a list.
ids = df.UID.unique()
op=[]
for i in ids:
df_i = df[df.UID == i]
X =df_i.drop(['UID','Rating'], axis=1)
y= df_i['Rating']
reg = LinearRegression().fit(X, y)
reg.score(X, y)
const = reg.intercept_
coef = reg.coef_
op.append(const)
op.append(coef)
op
I would like my output to look like this format (the data shown is dummy data). So each row has the UID, it's intercept, and the linear regression coefficients. This is where I am stuck.
UID
Intercept
A1
A2
A3
A4
1
3.2343
0.950311846
0.892135293
0.077054085
4.3454
2
2.123
0.824334482
0.388634543
0.947936483
2.3454
3
3.455
0.825486315
0.569809886
0.321117521
3.12343
Feel free to comment on the initial approach to get the regression models as well.
Thank you
Here is what I came up with/. I just need to add the UID, not sure how to add that for each row.
ids = df.UID.unique()
op = pd.DataFrame
intercept = []
coefficients=[]
UID = []
for i in ids:
df_i = df[df.UID == i]
X =df_i.drop(['UID','Rating'], axis=1)
y= df_i['Rating']
reg = LinearRegression().fit(X, y)
reg.score(X, y)
unique_id=df_i['UID'].unique()
const = reg.intercept_
coef = reg.coef_
UID.append(unique_id)
intercept.append(const)
coefficients.append(coef)
intercep_new = pd.DataFrame(intercept)
coefficients_new = pd.DataFrame(coefficients)
UID_new = pd.DataFrame(UID)
colNames = df.drop(['Rating',], axis=1).columns
colNames = colNames.insert(1, 'Const')
colNames
op = pd.concat([UID_new,intercep_new, coefficients_new], axis=1)
op.columns = colNames
See changes below:
ids = df.UID.unique()
op=pd.DataFrame()
for i in ids:
df_i = df[df.UID == i]
X =df_i.drop(['UID','Rating'], axis=1)
y= df_i['Rating']
reg = LinearRegression().fit(X, y)
reg.score(X, y)
const = reg.intercept_
coef = reg.coef_
uid=i
array=np.append(coef,const)
array=np.append(array,uid)
array=array.reshape(1,len(array))
df_append=pd.DataFrame(array)
op=op.append(df_append)
op.columns=['A'+str(i) for i in range (1,len(op.columns)+1)]
op.rename(columns={op.columns[-1]:"UID"},inplace=True)
op.rename(columns={op.columns[-2]:"Intercept"},inplace=True)
op=op.reset_index().drop('index',axis=1)
op=op.drop_duplicates()
I'm working in PySpark, and I'd like to find a way to perform linear regressions on groups of data. Specifically given this dataframe
import pandas as pd
pdf = pd.DataFrame({'group_id':[1,1,1,2,2,2,3,3,3,3],
'x':[0,1,2,0,1,5,2,3,4,5],
'y':[2,1,0,0,0.5,2.5,3,4,5,6]})
df = sqlContext.createDataFrame(pdf)
df.show()
# +--------+-+---+
# |group_id|x| y|
# +--------+-+---+
# | 1|0|2.0|
# | 1|1|1.0|
# | 1|2|0.0|
# | 2|0|0.0|
# | 2|1|0.5|
# | 2|5|2.5|
# | 3|2|3.0|
# | 3|3|4.0|
# | 3|4|5.0|
# | 3|5|6.0|
# +--------+-+---+
I'd now like to be able to fit a separate y ~ ax + b model for each group_id and output a new dataframe with columns a and b and a row for each group.
For instance for group 1 I could do:
from sklearn import linear_model
# Regression on group_id = 1
data = df.where(df.group_id == 1).toPandas()
regr = linear_model.LinearRegression()
regr.fit(data.x.values.reshape(len(data),1), data.y.reshape(len(data),1))
a = regr.coef_[0][0]
b = regr.intercept_[0]
print('For group 1, y = {0}*x + {1}'.format(a, b))
# Repeat for group_id=2, group_id=3
But to do this for each group involves bringing the data back to the driver one be one, which doesn't take advantage of any Spark parallelism.
Here's a solution I found. Instead of performing separate regressions on each group of data, create one sparse matrix with separate columns for each group:
from pyspark.mllib.regression import LabeledPoint, SparseVector
# Label points for regression
def groupid_to_feature(group_id, x, num_groups):
intercept_id = num_groups + group_id-1
# Need a vector containing x and a '1' for the intercept term
return SparseVector(num_groups*2, {group_id-1: x, intercept_id: 1.0})
labelled = df.map(lambda line:LabeledPoint(line[2],
groupid_to_feature(line[0], line[1], 3)))
labelled.take(5)
# [LabeledPoint(2.0, (6,[0,3],[0.0,1.0])),
# LabeledPoint(1.0, (6,[0,3],[1.0,1.0])),
# LabeledPoint(0.0, (6,[0,3],[2.0,1.0])),
# LabeledPoint(0.0, (6,[1,4],[0.0,1.0])),
# LabeledPoint(0.5, (6,[1,4],[1.0,1.0]))]
Then use Spark's LinearRegressionWithSGD to run the regression:
from pyspark.mllib.regression import LinearRegressionModel, LinearRegressionWithSGD
lrm = LinearRegressionWithSGD.train(labelled, iterations=5000, intercept=False)
The weights from this regression contain the coefficient and intercept for each group_id, i.e.
lrm.weights
# DenseVector([-1.0, 0.5, 1.0014, 2.0, 0.0, 0.9946])
or reshaped into a DataFrame to give a and b for each group:
pd.DataFrame(lrm.weights.reshape(2,3).transpose(), columns=['a','b'], index=[1,2,3])
# a b
# 1 -0.999990 1.999986e+00
# 2 0.500000 5.270592e-11
# 3 1.001398 9.946426e-01