Grouped linear regression in Spark - python

I'm working in PySpark, and I'd like to find a way to perform linear regressions on groups of data. Specifically given this dataframe
import pandas as pd
pdf = pd.DataFrame({'group_id':[1,1,1,2,2,2,3,3,3,3],
'x':[0,1,2,0,1,5,2,3,4,5],
'y':[2,1,0,0,0.5,2.5,3,4,5,6]})
df = sqlContext.createDataFrame(pdf)
df.show()
# +--------+-+---+
# |group_id|x| y|
# +--------+-+---+
# | 1|0|2.0|
# | 1|1|1.0|
# | 1|2|0.0|
# | 2|0|0.0|
# | 2|1|0.5|
# | 2|5|2.5|
# | 3|2|3.0|
# | 3|3|4.0|
# | 3|4|5.0|
# | 3|5|6.0|
# +--------+-+---+
I'd now like to be able to fit a separate y ~ ax + b model for each group_id and output a new dataframe with columns a and b and a row for each group.
For instance for group 1 I could do:
from sklearn import linear_model
# Regression on group_id = 1
data = df.where(df.group_id == 1).toPandas()
regr = linear_model.LinearRegression()
regr.fit(data.x.values.reshape(len(data),1), data.y.reshape(len(data),1))
a = regr.coef_[0][0]
b = regr.intercept_[0]
print('For group 1, y = {0}*x + {1}'.format(a, b))
# Repeat for group_id=2, group_id=3
But to do this for each group involves bringing the data back to the driver one be one, which doesn't take advantage of any Spark parallelism.

Here's a solution I found. Instead of performing separate regressions on each group of data, create one sparse matrix with separate columns for each group:
from pyspark.mllib.regression import LabeledPoint, SparseVector
# Label points for regression
def groupid_to_feature(group_id, x, num_groups):
intercept_id = num_groups + group_id-1
# Need a vector containing x and a '1' for the intercept term
return SparseVector(num_groups*2, {group_id-1: x, intercept_id: 1.0})
labelled = df.map(lambda line:LabeledPoint(line[2],
groupid_to_feature(line[0], line[1], 3)))
labelled.take(5)
# [LabeledPoint(2.0, (6,[0,3],[0.0,1.0])),
# LabeledPoint(1.0, (6,[0,3],[1.0,1.0])),
# LabeledPoint(0.0, (6,[0,3],[2.0,1.0])),
# LabeledPoint(0.0, (6,[1,4],[0.0,1.0])),
# LabeledPoint(0.5, (6,[1,4],[1.0,1.0]))]
Then use Spark's LinearRegressionWithSGD to run the regression:
from pyspark.mllib.regression import LinearRegressionModel, LinearRegressionWithSGD
lrm = LinearRegressionWithSGD.train(labelled, iterations=5000, intercept=False)
The weights from this regression contain the coefficient and intercept for each group_id, i.e.
lrm.weights
# DenseVector([-1.0, 0.5, 1.0014, 2.0, 0.0, 0.9946])
or reshaped into a DataFrame to give a and b for each group:
pd.DataFrame(lrm.weights.reshape(2,3).transpose(), columns=['a','b'], index=[1,2,3])
# a b
# 1 -0.999990 1.999986e+00
# 2 0.500000 5.270592e-11
# 3 1.001398 9.946426e-01

Related

return value to df after several operations

I run IPR outlier control for a relatively big dataframe df:
I perform IPR within subset of the data so I use for loop.
How can I return value to original df >1 000 000 rows:
months product brick units is_outlier
0 202104 abc 3 1.00 False
1 202104 abc 6 3.00 False
for product in df['product'].unique():
for brick in df['brick'].unique():
try:
# Extract the units for the current product and brick
data = df.loc[(df['product'] == product) & (df['brick'] == brick)]['units'].values
# Scale the data
scaler = StandardScaler()
data_scaled = scaler.fit_transform(data.reshape(-1, 1))
# Fit a linear regression model to the data
reg = LinearRegression()
reg.fit(np.arange(len(data_scaled)).reshape(-1, 1), data_scaled)
# Calculate the residuals of the regression
residuals = data_scaled - reg.predict(np.arange(len(data_scaled)).reshape(-1, 1))
# Identify any observations with a residual larger than 2 standard deviations from the mean
threshold = 2*residuals.std()
outliers = np.where(np.abs(residuals) > threshold)
# Set the "is_outlier" column to True for the outliers in the current product
df.loc[(df['product'] == product ) & (df['brick']== brick) & (df.index.isin(outliers[0])), 'is_outlier'] = True
except:
pass
As #QuangHoang suggested, use groupby and apply your custom function:
def outlier(df):
data = df.to_numpy().reshape((-1, 1))
# Scale the data
scaler = StandardScaler()
data_scaled = scaler.fit_transform(data)
# Fit a linear regression model to the data
reg = LinearRegression()
reg.fit(np.arange(len(data_scaled)).reshape(-1, 1), data_scaled)
# Calculate the residuals of the regression
residuals = data_scaled - reg.predict(np.arange(len(data_scaled)).reshape(-1, 1))
# Identify any observations with a residual
# larger than 2 standard deviations from the mean
threshold = 2*residuals.std()
return np.ravel(np.abs(residuals) > threshold)
df['is_outlier'] = df.groupby(['product', 'brick'])['units'].transform(outlier)

Custom F-test in linearmodels (Python)

I have fit a linearmodels.PanelOLS model and stored it in m. I now want to test if certain coefficients are simultaneously equal to zero.
Does a fitted linearmodels.PanelOLS object have an F-test function where I can pass my own restriction matrix?
I am looking for something like statsmodels' f_test method.
Here's a minimum reproducible example.
# Libraries
from linearmodels.panel import PanelOLS
from linearmodels.datasets import wage_panel
# Load data and set index
df = wage_panel.load()
df = df.set_index(['nr','year'])
# Add constant term
df['const'] = 1
# Fit model
m = PanelOLS(dependent=df['lwage'], exog=df[['const','expersq','married']])
m = m.fit(cov_type='clustered', cluster_entity=True)
# Is there an f_test method for m???
m.f_test(r_mat=some_matrix_here) # Something along these lines?
You can use wald_test (a standard F-test is numerically identical to a Walkd test under some assumptions on the covariance).
# Libraries
from linearmodels.panel import PanelOLS
from linearmodels.datasets import wage_panel
# Load data and set index
df = wage_panel.load()
df = df.set_index(['nr','year'])
# Add constant term
df['const'] = 1
# Fit model
m = PanelOLS(dependent=df['lwage'], exog=df[['const','expersq','married']])
m = m.fit(cov_type='clustered', cluster_entity=True)
Then the test
import numpy as np
# Use matrix notation RB - q = 0 where R is restr and q is value
# Restrictions: expersq = 0.001 & expersq+married = 0.2
restr = np.array([[0,1,0],[0,1,1]])
value = np.array([0.01, 0.2])
m.wald_test(restr, value)
This returns
Linear Equality Hypothesis Test
H0: Linear equality constraint is valid
Statistic: 0.2608
P-value: 0.8778
Distributed: chi2(2)
WaldTestStatistic, id: 0x2271cc6fdf0
You can also use formula syntax if you used formulas to define your model, which can be easier to code up.
fm = PanelOLS.from_formula("lwage~ 1 + expersq + married", data=df)
fm = fm.fit(cov_type='clustered', cluster_entity=True)
fm.wald_test(formula="expersq = 0.001,expersq+married = 0.2")
The result is the same as above.

How to perform time series analysis that contains multiple groups in Python using fbProphet or other models?

All,
My dataset looks like following. I am trying to predict the 'amount' for next 6 months using either the fbProphet or other model. But my issue is that I would like to predict amount based on each groups i.e A,B,C,D for next 6 months. I am not sure how to do that in python using fbProphet or other model ? I referenced official page of fbprophet, but the only information I found is that "Prophet" takes two columns only One is "Date" and other is "amount" .
I am new to python, so any help with code explanation is greatly appreciated!
import pandas as pd
data = {'Date':['2017-01-01', '2017-02-01', '2017-03-01', '2017-04-01','2017-05-01','2017-06-01','2017-07-01'],'Group':['A','B','C','D','C','A','B'],
'Amount':['12.1','13','15','10','12','9.0','5.6']}
df = pd.DataFrame(data)
print (df)
output:
Date Group Amount
0 2017-01-01 A 12.1
1 2017-02-01 B 13
2 2017-03-01 C 15
3 2017-04-01 D 10
4 2017-05-01 C 12
5 2017-06-01 A 9.0
6 2017-07-01 B 5.6
fbprophet requires two columns ds and y, so you need to first rename the two columns
df = df.rename(columns={'Date': 'ds', 'Amount':'y'})
Assuming that your groups are independent from each other and you want to get one prediction for each group, you can group the dataframe by "Group" column and run forecast for each group
from fbprophet import Prophet
grouped = df.groupby('Group')
for g in grouped.groups:
group = grouped.get_group(g)
m = Prophet()
m.fit(group)
future = m.make_future_dataframe(periods=365)
forecast = m.predict(future)
print(forecast.tail())
Take note that the input dataframe that you supply in the question is not sufficient for the model because group D only has a single data point. fbprophet's forecast needs at least 2 non-Nan rows.
EDIT: if you want to merge all predictions into one dataframe, the idea is to name the yhat for each observations differently, do pd.merge() in the loop, and then cherry-pick the columns that you need at the end:
final = pd.DataFrame()
for g in grouped.groups:
group = grouped.get_group(g)
m = Prophet()
m.fit(group)
future = m.make_future_dataframe(periods=365)
forecast = m.predict(future)
forecast = forecast.rename(columns={'yhat': 'yhat_'+g})
final = pd.merge(final, forecast.set_index('ds'), how='outer', left_index=True, right_index=True)
final = final[['yhat_' + g for g in grouped.groups.keys()]]
import pandas as pd
import numpy as np
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.stattools import adfuller
from matplotlib import pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
# Before doing any modeling using ARIMA or SARIMAS etc Confirm that
# your time-series is stationary by using Augmented Dick Fuller test
# or other tests.
# Create a list of all groups or get from Data using np.unique or other methods
groups_iter = ['A', 'B', 'C', 'D']
dict_org = {}
dict_pred = {}
group_accuracy = {}
# Iterate over all groups and get data
# from Dataframe by filtering for specific group
for i in range(len(groups_iter)):
X = data[data['Group'] == groups_iter[i]]['Amount'].values
size = int(len(X) * 0.70)
train, test = X[0:size], X[size:len(X)]
history = [x for in train]
# Using ARIMA model here you can also do grid search for best parameters
for t in range(len(test)):
model = ARIMA(history, order = (5, 1, 0))
model_fit = model.fit(disp = 0)
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
obs = test[t]
history.append(obs)
print("Predicted:%f, expected:%f" %(yhat, obs))
error = mean_squared_log_error(test, predictions)
dict_org.update({groups_iter[i]: test})
dict_pred.update({group_iter[i]: test})
print("Group: ", group_iter[i], "Test MSE:%f"% error)
group_accuracy.update({group_iter[i]: error})
plt.plot(test)
plt.plot(predictions, color = 'red')
plt.show()
I know this is old but I was trying to predict outcomes for different clients and I tried to use Aditya Santoso solution above but got into some errors, so I added a couple of modifications and finally this worked for me:
df = pd.read_csv('file.csv')
df = pd.DataFrame(df)
df = df.rename(columns={'date': 'ds', 'amount': 'y', 'client_id': 'client_id'})
#I had to filter first clients with less than 3 records to avoid errors as prophet only works for 2+ records by group
df = df.groupby('client_id').filter(lambda x: len(x) > 2)
df.client_id = df.client_id.astype(str)
final = pd.DataFrame(columns=['client','ds','yhat'])
grouped = df.groupby('client_id')
for g in grouped.groups:
group = grouped.get_group(g)
m = Prophet()
m.fit(group)
future = m.make_future_dataframe(periods=365)
forecast = m.predict(future)
#I added a column with client id
forecast['client'] = g
#I used concat instead of merge
final = pd.concat([final, forecast], ignore_index=True)
final.head(10)

Python pandas create new column with groupby with custom agg function

My DataFrame:
from random import random, randint
from pandas import DataFrame
t = DataFrame({"metasearch":["A","B","A","B","A","B","A","B"],
"market":["A","B","A","B","A","B","A","B"],
"bid":[random() for i in range(8)],
"clicks": [randint(0,10) for i in range(8)],
"country_code":["A","A","A","A","A","B","A","B"]})
I want to fit LinearRegression for each market, so I:
1) Group df - groups = t.groupby(by="market")
2) Prepare function to fit model on a group -
from sklearn.linear_model import LinearRegression
def group_fitter(group):
lr = LinearRegression()
X = group["bid"].fillna(0).values.reshape(-1,1)
y = group["clicks"].fillna(0)
lr.fit(X, y)
return lr.coef_[0] # THIS IS A SCALAR
3) Create a new Series with market as an index and coef as a value:
s = groups.transform(group_fitter)
But the 3rd step fails: KeyError: ('bid_cpc', 'occurred at index bid')
I think you need instead transform use apply because working with more columns in function together and for new column use join:
from sklearn.linear_model import LinearRegression
def group_fitter(group):
lr = LinearRegression()
X = group["bid"].fillna(0).values.reshape(-1,1)
y = group["clicks"].fillna(0)
lr.fit(X, y)
return lr.coef_[0] # THIS IS A SCALAR
groups = t.groupby(by="market")
df = t.join(groups.apply(group_fitter).rename('new'), on='market')
print (df)
bid clicks country_code market metasearch new
0 0.462734 9 A A A -8.632301
1 0.438869 5 A B B 6.690289
2 0.047160 9 A A A -8.632301
3 0.644263 0 A B B 6.690289
4 0.579040 0 A A A -8.632301
5 0.820389 6 B B B 6.690289
6 0.112341 5 A A A -8.632301
7 0.432502 0 B B B 6.690289
Just return the group from the function instead of the coefficient.
# return the group instead of scaler value
def group_fitter(group):
lr = LinearRegression()
X = group["bid"].fillna(0).values.reshape(-1,1)
y = group["clicks"].fillna(0)
lr.fit(X, y)
group['coefficient'] = lr.coef_[0] # <- This is the changed line
return group
# the new column gets added to the data
s = groups.apply(group_fitter)

How can better format the output that I'm attempting to save from several regressions?

I'd like to loop through several specifications of a linear regression and save the results for each model in a python dictionary. The code below is somewhat successful but additional text (e.g. datatype information) is included in the dictionary making it unreadable. Moreover, regarding the confidence interval, I'd like to have two separate columns - one for the upper and another for the lower-bound - but I'm unable to do that.
code:
import patsy
import statsmodels.api as sm
from collections import defaultdict
colleges = ['ARC_g',u'CCSF_g',u'DAC_g',u'DVC_g',u'LC_g',u'NVC_g',u'SAC_g', u'SRJC_g',u'SC_g',u'SCC_g']
results = defaultdict(lambda: defaultdict(int))
for exog in colleges:
exog = exog.encode('ascii')
f1 = 'GRADE_PT_103 ~ %s -1' % exog
y,X = patsy.dmatrices(f1, data,return_type='dataframe')
mod = sm.OLS(y, X) # Describe model
res = mod.fit() # Fit model
results[exog]['beta'] = res.params
#I'd like the confidence interval to be separated into two columns ('upper' and 'lower')
results[exog]['CI'] = res.conf_int()
results[exog]['rsq'] = res.rsquared
pd.DataFrame(results)
______Current output
ARC_g | CCSF_g | ...
beta | ARC_g 0.79304 dtype: float64 | CCSF_g 0.833644 dtype: float64
CI | 0 1 ARC_g 0.557422 1.0... 0 1| CCSF_g 0.655746 1...
rsq | 0.122551 | 0.213053
This is how I'd summarize what you were showing. Hopefully it helps give you some ideas.
import pandas as pd
import statsmodels.formula.api as smf
data = pd.DataFrame(np.random.randn(30, 5), columns=list('YABCD'))
results = {}
for c in data.columns[1:]:
f = 'Y ~ {}'.format(c)
r = smf.ols(formula=f, data=data).fit()
coef = pd.concat([r.params,
r.conf_int().iloc[:, 0],
r.conf_int().iloc[:, 1]], axis=1, keys=['coef', 'lower', 'upper'])
coef.index = ['Intercept', 'Beta']
results[c] = dict(coef=coef, rsq=r.rsquared)
keys = data.columns[1:]
summary = pd.concat([results[k]['coef'].stack() for k in keys], axis=1, keys=keys)
summary.index = summary.index.to_series().str.join(' - ')
summary.append(pd.Series([results[k]['rsq'] for k in keys], keys, name='R Squared'))

Categories