dataframe object is not callable in pyspark - python

temp = Window.partitionBy("id").orderBy("time").rowsBetween(-5, 5)
spark_df.withColumn("movingAvg",fn.avgspark_df("average")).over(temp)).show()
I'm getting this error in the last line .
dataframe object is not callable

You are missing a bracket, but it also seems some of the syntax is wrong. I assume this is what your code was before the bracket got missing:
fn.avgspark_df("average")
Which is why you get the error; you are trying to call the DataFrame as a function. I believe you can achieve what you want with:
import pyspark.sql.functions as fn
from pyspark.sql import Window
df = pd.DataFrame({'id': [0,0,0,0,0,1,1,1,1,1],
'time': [1,2,3,4,5,1,2,3,4,5],
'average':[0,1,2,3,4,5,6,7,8,9] })
df = sqlContext.createDataFrame(df)
temp = Window.partitionBy("id").orderBy("time").rowsBetween(-1, 1)
df.withColumn("movingAvg",fn.avg("average").over(temp)).show()

from pyspark.sql import SparkSession
from pyspark.sql import Window
from pyspark.sql.functions import max,min,avg
spark = SparkSession.builder.appName("Data Frame Example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
l=[("Alice", "2016-05-01", 50.00),
("Alice", "2016-05-03", 45.00),
("Alice", "2016-05-04", 55.00),
("Bob", "2016-05-01", 25.00),
("Bob", "2016-05-04", 29.00),
("Bob", "2016-05-06", 27.00)]
customers = spark.sparkContext.parallelize(l).toDF(["name", "date", "amountSpent"])
temp = Window.partitionBy("name").orderBy("date")
customers.withColumn( "movingAvg",avg("amountSpent").over(temp)).show()

Related

Check if Input dataset contains a key or not in PySpark

I have the following code as shown below. I need to check if the column y.lc.eoouh.ci is present in the input source and populate the column only if present, else it should be NULL.(The key lc is also optional)
The code below doesn't seem to work the way it is supposed to as even though y.lc.eoouch.ci is present in the input, it evaluates to NULL.
The has_column implementation is from here.
df = df_s_a \
.withColumn("ceci", \
udf(
lambda y : y.lc[-1].eoouh.ci \
if has_column(y, 'lc.eoouh.ci') \
else None, \
StringType()
)(col('eh') \
) \
) \
.select(
col('ceci')
)
df.show()
Sample input:
{
eh: {
lc: [
eoouch: {
ci: "1234ABC"
}
]
}
}
The df[something.path.somewhere] doesn't work. I'll have to investigate that option a bit.
I've managed to make it work like this:
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from pyspark.sql.types import StringType
def has_column(df):
try:
df["lc"][0]["eoouch"]["ci"]
return True
except KeyError:
return False
if __name__ == "__main__":
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
data = [
{"eh": {"lc": [{"eoouch": {"ci": "test"}}]}},
{"eh": {"lc": [{"eoouch": {"as": "test"}}]}},
]
df = spark.createDataFrame(data)
add_column_udf = F.udf(
lambda y: y if has_column(y) else None,
StringType(),
)
df = df.withColumn("ceci", add_column_udf(F.col("eh")))
Result:
+----------------------------------+-------------------------+
|eh |ceci |
+----------------------------------+-------------------------+
|{lc -> [{eoouch -> {ci -> test}}]}|{lc=[{eoouch={ci=test}}]}|
|{lc -> [{eoouch -> {as -> test}}]}|null |
+----------------------------------+-------------------------+
It's not perfect since it's not a general solution for column name but it could be easily generalized since it works on a dict object.

Unable to finish the spark job on spark standalone cluster

I'm a very novice to spark, been on spark for just a week. This is my code in pyspark, running on standalone spark cluster with a single master and two slaves. Trying to run a job spark reading a 01. million record data and performing some manipulation on the data then dumping the dataframe on oracle table.Im having trouble completing the job. Its seems like this program as created 404 partitions to complete the tasks. On the console or terminal I can see 403/404 are completed but the last and final task on the partition 404 is taking forever to complete the job. I'm unable to complete the job. can anyone tell me the issue with my code. can anyone help with optimizing the performance on spark or can point me to a guide or something? Any tut or guide would help. Thanks in advance
# creating a spark session
spark = SparkSession \
.builder \
.appName("pyspark_testing_29012020") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# target table schema and column order
df_target = spark.read.csv("mycsv path", header = True)
df_local_schema = df_target.schema
df_column_order = df_target.columns
# dataframe with input file/dataset values and schema
df_source = spark.read\
.format("csv")\
.option("header", "false")\
.option("inferschema", "true")\
.option("delimiter", ",")\
.schema(df_local_schema)\
.load("csv path")
# dataframe with the target file/dataset values
df_target = spark.read\
.format("jdbc") \
.option("url", "jdbc:oracle:thin:system/oracle123#127.0.0.1:0101:orcl") \
.option("dbtable", "mydata") \
.option("user", "system") \
.option("password", "oracle123") \
.option("driver", "oracle.jdbc.driver.OracleDriver")\
.load()
# splitting the target table in to upper and lower sections
df_target_upper = df_target.where(df_target['Key'] < 5) # set A
df_source_upper = df_source.where(df_source['Key'] < 5) # set B
df_source_lower = df_source.where(df_source['Key'] > 4) # set D
df_target_lower = df_target.where(df_target['key'] > 4) # set C
''' now programming for the upper segment of the data '''
# set operation A-B
A_minus_B = df_target_upper.join(df_source_upper,
on=['key1', 'key2', 'key3', 'key4'],
how='left_anti')
A_minus_B = A_minus_B.select(sorted(df_column_order))
# set operation B-A
B_minus_A = df_source_upper.join(df_target_upper,
on=['key1', 'key2','key3','key4'],how = 'left_anti')
B_minus_A = B_minus_A.select(sorted(df_column_order))
# union of A-B and B-A
AmB_union_BmA = A_minus_B.union(B_minus_A)
AmB_union_BmA = AmB_union_BmA.select(sorted(df_column_order))
# A-B left anti B-A to get the uncommon record in both the dataframes
new_df = A_minus_B.join(B_minus_A, on=['key'], how = 'left_anti')
new_df = new_df.select(sorted(df_column_order))
AmB_union_BmA = AmB_union_BmA.select(sorted(df_column_order))
AnB = df_target_upper.join(df_source_upper,
on=['key1', 'key2', 'key3', 'key4'],
how='inner')
df_AnB_without_dupes = dropDupeDfCols(AnB)
new_AnB = df_AnB_without_dupes.select(sorted(df_column_order))
final_df = AmB_union_BmA.union(new_AnB)
final_df.show()
result_df = B_minus_A.union(new_df)
df_result_upper_seg = result_df.union(new_AnB)
''' now programming for the lower segment of the data '''
# set operation C-D
C_minus_D = df_target_lower.join(df_source_lower, on=['key'], how='left_anti')
C_minus_D = C_minus_D.select(sorted(df_column_order))
# set operation D-C
D_minus_C = df_source_lower.join(df_target_lower, on=['key'], how = 'left_anti')
D_minus_C = D_minus_C.select(sorted(df_column_order))
# union of C-D union D-C
CmD_union_DmC = C_minus_D.union(D_minus_C)
CmD_union_DmC = CmD_union_DmC.select(sorted(df_column_order))
# C-D left anti D-C to get the uncommon record in both the dataframes
lower_new_df = C_minus_D.join(D_minus_C, on=['key'], how = 'left_anti')
lower_new_df = lower_new_df.select(sorted(df_column_order))
CmD_union_DmC = CmD_union_DmC.select(sorted(df_column_order))
CnD = df_target_lower.join(df_source_lower,
on=['key'], how='inner')
new_CnD = dropDupeDfCols(CnD)
new_CnD = new_CnD.select(sorted(df_column_order))
lower_final_df = CmD_union_DmC.union(new_CnD)
result_df_lower = D_minus_C.union(lower_new_df)
df_result_lower_seg = result_df_lower.union(new_CnD)
df_final_result .write \
.format("jdbc") \
.mode("overwrite")\
.option("url", "jdbc:oracle:thin:system/oracle123#127.0.0.1:1010:orcl") \
.option("dbtable", "mydata") \
.option("user", "system") \
.option("password", "oracle123") \
.option("driver", "oracle.jdbc.driver.OracleDriver") \
.save()
Take a look at Spark UI and monitoring guide
Try to split your job into steps. Then find the step that is failing.

Windowing and aggregating pyspark DataFrame [duplicate]

This question already has an answer here:
Spark Structured Streaming using sockets, set SCHEMA, Display DATAFRAME in console
(1 answer)
Closed 5 years ago.
I'm trying to process incoming events from a socket, then windowing and aggregating the event data. I've hit a snag with the windowing. It appears that even though I specify a schema for the DataFrame, it does not translate into columns.
import sys
from pyspark.sql.types import StructType, StringType, TimestampType, FloatType, IntegerType, StructField
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
if __name__ == "__main__":
# our data currently looks like this (tab separated).
# -SYMBOL DATE PRICE TICKVOL BID ASK
# NQU7 2017-05-28T15:00:00 5800.50 12 5800.50 5800.50
# NQU7 2017-05-28T15:00:00 5800.50 1 5800.50 5800.50
# NQU7 2017-05-28T15:00:00 5800.50 5 5800.50 5800.50
# NQU7 2017-05-28T15:00:00 5800.50 1 5800.50 5800.50
if len(sys.argv) != 3:
# print("Usage: network_wordcount.py <hostname> <port>", file=sys.stderr)
exit(-1)
spark = SparkSession \
.builder \
.appName("StructuredTickStream") \
.getOrCreate()
sc = spark.sparkContext
sc.setLogLevel('WARN')
# Read all the csv files written atomically in a directory
tickSchema = StructType([
StructField("symbol", StringType(), True),
StructField("dt", TimestampType(), True),
StructField("price", FloatType(), True),
StructField("tickvol", IntegerType(), True),
StructField("bid", FloatType(), True),
StructField("ask", FloatType(), True)
])
events_df = spark \
.readStream \
.option("sep", "\t") \
.option("host", sys.argv[1]) \
.option("port", sys.argv[2]) \
.format("socket") \
.schema(tickSchema) \
.load()
events_df.printSchema()
print("columns = ", events_df.columns)
ohlc_df = events_df \
.groupby(F.window("dt", "5 minutes", "1 minutes")) \
.agg(
F.first('price').alias('open'),
F.max('price').alias('high'),
F.min('price').alias('low'),
F.last('price').alias('close')
) \
.collect()
query = ohlc_df \
.writeStream \
.outputMode("complete") \
.format("console") \
.start()
query.awaitTermination()
The output of the print("columns = ", events_df.columns) is ['value'], and the process fails with the following trace:
pyspark.sql.utils.AnalysisException: "cannot resolve '`dt`' given input columns: [value];;\n'Aggregate [timewindow('dt, 300000000, 60000000, 0)], [timewindow('dt, 300000000, 60000000, 0) AS window#3, first('price, false) AS open#7, max('price) AS high#9, min('price) AS low#11, last('price, false) AS close#13]\n+- StreamingRelation DataSource(org.apache.spark.sql.SparkSession#3a32b1a2,socket,List(),Some(StructType(StructField(symbol,StringType,true), StructField(dt,TimestampType,true), StructField(price,FloatType,true), StructField(tickvol,IntegerType,true), StructField(bid,FloatType,true), StructField(ask,FloatType,true))),List(),None,Map(sep -> \t, host -> localhost, port -> 9999),None), textSocket, [value#0]\n"
Any idea what I'm doing wrong?
Your data frame has only one column value and here you are trying to access column dt from this events_df. This is the main reason of the problem.
Below statement clearly, shows it have single column value
print("columns = ", events_df.columns)
You need to inspect this
events_df = spark \
.readStream \
.option("sep", "\t") \
.option("host", sys.argv[1]) \
.option("port", sys.argv[2]) \
.format("socket") \
.schema(tickSchema) \
.load()
Why it is creating df with only one column.

pyspark cannot parse timestamp in json

for example, given the following json (named 'json':
{"myTime": "2016-10-26 18:19:15"}
and the following python script:
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.sql import SQLContext
conf = SparkConf().setAppName('simpleTest')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
print sc.version
json_file = 'json'
df = sqlContext.read.json(json_file,timestampFormat='yyyy-MM-dd HH:mm:ss')
df.printSchema()
The output is:
2.0.2
root
|-- myTime: string (nullable = true)
I expected the schema to be defined as timestamp.
What am i missing?
You need to define a schema explictly:
from pyspark.sql.types import StructType, StructField, TimestampType
schema = StructType([StructField("myTime", TimestampType(), True)])
df = spark.read.json(json_file, schema=schema, timestampFormat="yyyy-MM-dd HH:mm:ss")
This will output:
>>> df.collect()
[Row(myTime=datetime.datetime(2016, 10, 26, 18, 19, 15))]
>>> df.printSchema()
root
|-- myTime: timestamp (nullable = true)
>>>
Additional to Dat Tran solution, you can also directly apply cast to dataframe column after reading the file.
# example
from pyspark.sql import Row
json = [Row(**{"myTime": "2016-10-26 18:19:15"})]
df = spark.sparkContext.parallelize(json).toDF()
# using cast to 'timestamp' format
df_time = df.select(df['myTime'].cast('timestamp'))
df_time.printSchema()
root
|-- myTime: timestamp (nullable = true)

AttributeError: module 'pandas' has no attribute 'to_csv'

I took some rows from csv file like this
pd.DataFrame(CV_data.take(5), columns=CV_data.columns)
and performed some functions on it. now i want to save it in csv again but it is giving error module 'pandas' has no attribute 'to_csv'
I am trying to save it like this
pd.to_csv(CV_data, sep='\t', encoding='utf-8')
here is my full code. how can i save my resulting data in csv or excel?
# Disable warnings, set Matplotlib inline plotting and load Pandas package
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
import pandas as pd
pd.options.display.mpl_style = 'default'
CV_data = sqlContext.read.load('Downloads/data/churn-bigml-80.csv',
format='com.databricks.spark.csv',
header='true',
inferSchema='true')
final_test_data = sqlContext.read.load('Downloads/data/churn-bigml-20.csv',
format='com.databricks.spark.csv',
header='true',
inferSchema='true')
CV_data.cache()
CV_data.printSchema()
pd.DataFrame(CV_data.take(5), columns=CV_data.columns)
from pyspark.sql.types import DoubleType
from pyspark.sql.functions import UserDefinedFunction
binary_map = {'Yes':1.0, 'No':0.0, True:1.0, False:0.0}
toNum = UserDefinedFunction(lambda k: binary_map[k], DoubleType())
CV_data = CV_data.drop('State').drop('Area code') \
.drop('Total day charge').drop('Total eve charge') \
.drop('Total night charge').drop('Total intl charge') \
.withColumn('Churn', toNum(CV_data['Churn'])) \
.withColumn('International plan', toNum(CV_data['International plan'])) \
.withColumn('Voice mail plan', toNum(CV_data['Voice mail plan'])).cache()
final_test_data = final_test_data.drop('State').drop('Area code') \
.drop('Total day charge').drop('Total eve charge') \
.drop('Total night charge').drop('Total intl charge') \
.withColumn('Churn', toNum(final_test_data['Churn'])) \
.withColumn('International plan', toNum(final_test_data['International plan'])) \
.withColumn('Voice mail plan', toNum(final_test_data['Voice mail plan'])).cache()
pd.DataFrame(CV_data.take(5), columns=CV_data.columns)
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree
def labelData(data):
# label: row[end], features: row[0:end-1]
return data.map(lambda row: LabeledPoint(row[-1], row[:-1]))
training_data, testing_data = labelData(CV_data).randomSplit([0.8, 0.2])
model = DecisionTree.trainClassifier(training_data, numClasses=2, maxDepth=2,
categoricalFeaturesInfo={1:2, 2:2},
impurity='gini', maxBins=32)
print (model.toDebugString())
print ('Feature 12:', CV_data.columns[12])
print ('Feature 4: ', CV_data.columns[4] )
from pyspark.mllib.evaluation import MulticlassMetrics
def getPredictionsLabels(model, test_data):
predictions = model.predict(test_data.map(lambda r: r.features))
return predictions.zip(test_data.map(lambda r: r.label))
def printMetrics(predictions_and_labels):
metrics = MulticlassMetrics(predictions_and_labels)
print ('Precision of True ', metrics.precision(1))
print ('Precision of False', metrics.precision(0))
print ('Recall of True ', metrics.recall(1))
print ('Recall of False ', metrics.recall(0))
print ('F-1 Score ', metrics.fMeasure())
print ('Confusion Matrix\n', metrics.confusionMatrix().toArray())
predictions_and_labels = getPredictionsLabels(model, testing_data)
printMetrics(predictions_and_labels)
CV_data.groupby('Churn').count().toPandas()
stratified_CV_data = CV_data.sampleBy('Churn', fractions={0: 388./2278, 1: 1.0}).cache()
stratified_CV_data.groupby('Churn').count().toPandas()
pd.to_csv(CV_data, sep='\t', encoding='utf-8')
to_csv is a method of a DataFrame object, not of the pandas module.
df = pd.DataFrame(CV_data.take(5), columns=CV_data.columns)
# whatever manipulations on df
df.to_csv(...)
You also have a line pd.DataFrame(CV_data.take(5), columns=CV_data.columns) in your code.
This line creates a dataframe and then discards it. Even if you were successfully calling to_csv, none of your changes to CV_data would have been reflected in that dataframe (and therefore in the outputed csv file).
Solution-
You should write df.to_csv instead of pd.to_csv
Justification-
to_csv is a method to an object which is a df (DataFrame); while pd is Panda module.
Hence, your code was not working and throwing this Error "
AttributeError: module 'pandas' has no attribute 'to_csv'"
This will do the job!
#Create a DataFrame:
new_df = pd.DataFrame({'id': [1,2,3,4,5], 'LETTERS': ['A','B','C','D','E'], 'letters': ['a','b','c','d','e']})
#Save it as csv in your folder:
new_df.to_csv('C:\\Users\\You\\Desktop\\new_df.csv')

Categories