Related
I want to get the number of unique connections between locations, so a->b and b->a, should count as one. The dataframe contains timestamps and start&end location name. The result should present unique connections between stations per day of the year.
import findspark
findspark.init('/home/[user_name]/spark-3.1.2-bin-hadoop3.2')
import pyspark
from pyspark.sql.functions import date_format, countDistinct, struct, col
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('cluster1').getOrCreate()
from pyspark.sql.types import StructType,StructField, StringType, IntegerType, DateType, TimestampType
from pyspark.sql.functions import to_timestamp
data2 = [
('2017-12-29 16:57:39.6540','2017-12-29 16:57:39.6540',"A","B"),
("2017-12-29 16:57:39.6540","2017-12-29 17:57:39.6540","B","A"),
("2017-12-29 16:57:39.6540","2017-12-29 19:57:39.6540","B","A"),
("2017-12-30 16:57:39.6540","2017-12-30 16:57:39.6540","C","A"),
("2017-12-30 16:57:39.6540","2017-12-30 17:57:39.6540","B","F"),
("2017-12-31 16:57:39.6540","2017-12-31 16:57:39.6540","C","A"),
("2017-12-31 16:57:39.6540","2017-12-31 17:57:39.6540","A","C"),
("2017-12-31 16:57:39.6540","2017-12-31 17:57:39.6540","B","C"),
("2017-12-31 16:57:39.6540","2017-12-31 17:57:39.6540","A","B"),
]
schema = StructType([ \
StructField("start",StringType(),True), \
StructField("end",StringType(),True), \
StructField("start_loc",StringType(),True), \
StructField("end_loc", StringType(), True)
])
df2 = spark.createDataFrame(data=data2,schema=schema)
df2 = df2.withColumn("start_timestamp",to_timestamp("start"))
df2 = df2.withColumn("end_timestamp",to_timestamp("end"))
df2 = df2.drop("start", "end")
df2.printSchema()
df2.show(truncate=False)
df2_agg = df2.withColumn("date", date_format('start_timestamp', 'D'))\
.groupBy('date', 'start_loc','end_loc').agg(
collect_list(struct(col('start_loc'), col('end_loc'))).alias("n_routes_sets"),
)
df2_agg.show()
The result looks like this:
,but the Result should be like this:
date
n_routes
365
3
364
2
363
1
Below line is wrong.
collect_list(struct(col('start_loc'), col('end_loc'))).alias("n_routes_sets"),
Modify your lines as per below and reorder the a,b and b,a always as a,b or vice-versa:
from pyspark.sql.functions import date_format, countDistinct, collect_set, struct, col, when, size
...
...
df2 = df2.withColumn("sl2", when(df2['end_loc'] < df2['start_loc'], df2['end_loc']).otherwise(df2['start_loc']) )
df2 = df2.withColumn("el2", when(df2['end_loc'] > df2['start_loc'], df2['end_loc']).otherwise(df2['start_loc']) )
df2 = df2.drop("start_loc", "end_loc")
df2.printSchema()
df2.show(truncate=False)
df2_agg = df2.withColumn("date", date_format('start_timestamp', 'D'))\
.groupBy('date').agg(collect_set(struct(col('sl2'), col('el2'))).alias("n_routes_sets"),
)
df2_agg.select("date", size("n_routes_sets")).show()
returns:
+----+-------------------+
|date|size(n_routes_sets)|
+----+-------------------+
| 363| 1|
| 364| 2|
| 365| 3|
+----+-------------------+
I have three pyspark dataframes. I want to put the dataset reference in a dictionary, write a loop, perform some operations on these three dataframes, and then save them for further analysis. But I struggle with the last step. I have two questions:
In my code below, how do I access the results in TRANSFORMS? When I type: print(TRANSFORMS[0])
I only get this cryptic result:
<function __main__.multi_output(Input_table, table_name='ONE')>
Is there a mistake in my code and the transormations are never made?
How do I modify the function so it saves three datasets like df_1_result, df_2_result, df_3_result which I can then later use in further analysis?
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
spark = SparkSession.builder.appName('Sparky').getOrCreate()
# Create the initial dataframe
data = [("James","M",60000),("Michael","M",70000),
("Robert",None,400000),("Maria","F",500000),
("Jen","",None)]
columns = ["name","gender","salary"]
df_when = spark.createDataFrame(data = data, schema = columns)
# Create three identical datasets
df_1 = df_when
df_2 = df_when
df_3 = df_when
TRANSFORMS = []
DATASETS = {
"ONE" : df_1,
"TWO" : df_2,
"THREE" : df_3,
}
for table_name, table_location in list(DATASETS.items()):
def multi_output(Input_table, table_name=table_name):
if table_name=="ONE":
output_table = Input_table.drop("name")
elif table_name=="TWO":
output_table== Input_table.drop("gender")
elif table_name=="THREE":
output_table = Input_table.drop("salary")
return output_table
TRANSFORMS.append(multi_output)
There are a couple of issues here:
Issue 1: TRANSFORMS.append(multi_output) simply adds the function definition to the TRANSFORMS list. The function is never invoked. Also, we should define it outside the for-loop.
Issue 2: The statement under the second condition has a typo.
The code below, should work as expected.
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
spark = SparkSession.builder.appName('Sparky').getOrCreate()
# Create the initial dataframe
data = [("James","M",60000),("Michael","M",70000),
("Robert",None,400000),("Maria","F",500000),
("Jen","",None)]
columns = ["name","gender","salary"]
df_when = spark.createDataFrame(data = data, schema = columns)
# Create three identical datasets
df_1 = df_when
df_2 = df_when
df_3 = df_when
TRANSFORMS = []
DATASETS = {
"ONE" : df_1,
"TWO" : df_2,
"THREE" : df_3,
}
def multi_output(Input_table, table_name):
output_table = Input_table
if table_name=="ONE":
output_table = Input_table.drop("name")
elif table_name=="TWO":
output_table= Input_table.drop("gender")
elif table_name=="THREE":
output_table = Input_table.drop("salary")
return output_table
for table_name, table_location in list(DATASETS.items()):
TRANSFORMS.append(multi_output(table_location,table_name))
len(TRANSFORMS)
TRANSFORMS[0].show()
TRANSFORMS[1].show()
TRANSFORMS[2].show()
+------+------+
|gender|salary|
+------+------+
| M| 60000|
| M| 70000|
| null|400000|
| F|500000|
| | null|
+------+------+
+-------+------+
| name|salary|
+-------+------+
| James| 60000|
|Michael| 70000|
| Robert|400000|
| Maria|500000|
| Jen| null|
+-------+------+
+-------+------+
| name|gender|
+-------+------+
| James| M|
|Michael| M|
| Robert| null|
| Maria| F|
| Jen| |
+-------+------+
I have a Spark Dataframe (json_df) and I need to create another Dataframe based on the json nested:
This is my current Dataframe:
I know I could do that manually like: final_df = json_df.select( col("Body.EquipmentId"),..... ) but I want to do that in a generic way.
note: for this specific DF, the json records has the same structure.
Any idea?
Thanks!
Programmatically, you can do it like this:
from pyspark.sql import *
from pyspark.sql.functions import *
from pyspark import SparkContext, SparkConf
from pyspark.sql import functions as F
conf = SparkConf()
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
df = sc.parallelize([({"A":1, "B":2},), ({"A":3,"B":4},), ({"A":5,"B":6},)]).toDF(['Body'])
keys_df = df.select(F.explode(F.map_keys(F.col('Body')))).distinct()
keys = list(map(lambda row: row[0], keys_df.collect()))
key_cols = list(map(lambda f: F.col("Body").getItem(f).alias(str(f)), keys))
final_cols = df.select(key_cols)
final_cols.show()
Which produces
+---+---+
| B| A|
+---+---+
| 2| 1|
| 4| 3|
| 6| 5|
+---+---+
If you have the entire list of keys already, you can skip the part where it gets the keys and just set the keys manually:
keys = ['A', 'B']
Source: https://mungingdata.com/pyspark/dict-map-to-multiple-columns/
from pyspark.sql.functions import *
data = [("1","2019-07-01","2019-02-03"),("2","2019-06-24","2019-03-21"),("3","2019-08-24","2020-08-24")]
df=spark.createDataFrame(data=data,schema=["id","date1",'date2'])
df.show()
Expected Output
I tried with below code :
from pyspark.sql.functions import udf
import pyspark.sql.functions as sf
def get_datediff(vec):
d1=vec[0];d2=vec[1]
rt=datediff(d1,d2)
return(rt)
df = df.withColumn('date_diff1', sf.udf(get_datediff)(array('date1','date2')))
df.show()
But i am getting Below Error , unable to get date diff.
If you're using Spark SQL functions, you don't need to define a UDF. Just call the function directly, e.g.
import pyspark.sql.functions as F
data = [("1","2019-07-01","2019-02-03"),("2","2019-06-24","2019-03-21"),("3","2019-08-24","2020-08-24")]
df = spark.createDataFrame(data=data,schema=["id","date1",'date2'])
df2 = df.withColumn('date_diff1', F.datediff('date1','date2'))
df2.show()
+---+----------+----------+----------+
| id| date1| date2|date_diff1|
+---+----------+----------+----------+
| 1|2019-07-01|2019-02-03| 148|
| 2|2019-06-24|2019-03-21| 95|
| 3|2019-08-24|2020-08-24| -366|
+---+----------+----------+----------+
If you insist on using UDF, you can do this:
import pyspark.sql.functions as F
from datetime import datetime
data = [("1","2019-07-01","2019-02-03"),("2","2019-06-24","2019-03-21"),("3","2019-08-24","2020-08-24")]
df = spark.createDataFrame(data=data,schema=["id","date1",'date2'])
#F.udf('int')
def datediff_udf(d1, d2):
d1 = datetime.strptime(d1, "%Y-%m-%d")
d2 = datetime.strptime(d2, "%Y-%m-%d")
return (d1 - d2).days
df2 = df.withColumn('date_diff1', datediff_udf('date1', 'date2'))
I have 2 data frames to compare both have the same number of columns and the comparison result should have the field that is mismatching and the values along with the ID.
Dataframe one
+-----+---+--------+
| name| id| City|
+-----+---+--------+
| Sam| 3| Toronto|
| BALU| 11| YYY|
|CLAIR| 7|Montreal|
|HELEN| 10| London|
|HELEN| 16| Ottawa|
+-----+---+--------+
Dataframe two
+-------------+-----------+-------------+
|Expected_name|Expected_id|Expected_City|
+-------------+-----------+-------------+
| SAM| 3| Toronto|
| BALU| 11| YYY|
| CLARE| 7| Montreal|
| HELEN| 10| Londn|
| HELEN| 15| Ottawa|
+-------------+-----------+-------------+
Expected Output
+---+------------+--------------+-----+
| ID|Actual_value|Expected_value|Field|
+---+------------+--------------+-----+
| 7| CLAIR| CLARE| name|
| 3| Sam| SAM| name|
| 10| London| Londn| City|
+---+------------+--------------+-----+
Code
Create example data
from pyspark.sql import SQLContext
from pyspark.context import SparkContext
from pyspark.sql.functions import *
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
from pyspark.sql import SparkSession
sc = SparkContext()
sql_context = SQLContext(sc)
spark = SparkSession.builder.getOrCreate()
spark.sparkContext.setLogLevel("ERROR") # log only on fails
df_Actual = sql_context.createDataFrame(
[("Sam", 3,'Toronto'), ("BALU", 11,'YYY'), ("CLAIR", 7,'Montreal'),
("HELEN", 10,'London'), ("HELEN", 16,'Ottawa')],
["name", "id","City"]
)
df_Expected = sql_context.createDataFrame(
[("SAM", 3,'Toronto'), ("BALU", 11,'YYY'), ("CLARE", 7,'Montreal'),
("HELEN", 10,'Londn'), ("HELEN", 15,'Ottawa')],
["Expected_name", "Expected_id","Expected_City"]
)
Create empty dataframe for Result
field = [
StructField("ID",StringType(), True),
StructField("Actual_value", StringType(), True),
StructField("Expected_value", StringType(), True),
StructField("Field", StringType(), True)
]
schema = StructType(field)
Df_Result = sql_context.createDataFrame(sc.emptyRDD(), schema)
Join expected and actual on id's
df_cobined = df_Actual.join(df_Expected, (df_Actual.id == df_Expected.Expected_id))
col_names=df_Actual.schema.names
Loop through each column to find mismatches
for col_name in col_names:
#Filter for column values not matching
df_comp= df_cobined.filter(col(col_name)!=col("Expected_"+col_name ))\
.select(col('id'),col(col_name),col("Expected_"+col_name ))
#Add not matching column name
df_comp = df_comp.withColumn("Field", lit(col_name))
#Add to final result
Df_Result = Df_Result.union(df_comp)
Df_Result.show()
This code works as expected. However, in the real case, I have more columns and millions of rows to compare. With this code, it takes more time to finish the comparison. Is there a better way to increase the performance and get the same result?
One way to avoid doing the union is the following:
Create a list of columns to compare: to_compare
Next select the id column and use pyspark.sql.functions.when to compare the columns. For those with a mismatch, build an array of structs with 3 fields: (Actual_value, Expected_value, Field) for each column in to_compare
Explode the temp array column and drop the nulls
Finally select the id and use col.* to expand the values from the struct into columns.
Code:
StructType to store the mismatched fields.
import pyspark.sql.functions as f
# these are the fields you want to compare
to_compare = [c for c in df_Actual.columns if c != "id"]
df_new = df_cobined.select(
"id",
f.array([
f.when(
f.col(c) != f.col("Expected_"+c),
f.struct(
f.col(c).alias("Actual_value"),
f.col("Expected_"+c).alias("Expected_value"),
f.lit(c).alias("Field")
)
).alias(c)
for c in to_compare
]).alias("temp")
)\
.select("id", f.explode("temp"))\
.dropna()\
.select("id", "col.*")
df_new.show()
#+---+------------+--------------+-----+
#| id|Actual_value|Expected_value|Field|
#+---+------------+--------------+-----+
#| 7| CLAIR| CLARE| name|
#| 10| London| Londn| City|
#| 3| Sam| SAM| name|
#+---+------------+--------------+-----+
Join only those records where expected id equals actual and there is mismatch in any other column:
df1.join(df2, df1.id=df2.id and (df1.name != df2.name or df1.age != df2.age...))
This means you will do for loop only across mismatched rows, instead of whole dataset.
For this who are looking for an answer, I transposed the data frame and then did a comparison.
from pyspark.sql.functions import array, col, explode, struct, lit
def Transposedf(df, by,colheader):
# Filter dtypes and split into column names and type description
cols, dtypes = zip(*((c, t) for (c, t) in df.dtypes if c not in by))
# Spark SQL supports only homogeneous columns
assert len(set(dtypes)) == 1, "All columns have to be of the same type"
# Create and explode an array of (column_name, column_value) structs
kvs = explode(array([ struct(lit(c).alias("Field"), col(c).alias(colheader)) for c in cols ])).alias("kvs")
return df.select(by + [kvs]).select(by + ["kvs.Field", "kvs."+colheader])
Then the comparison looks like this
def Compare_df(df_Expected,df_Actual):
df_combined = (df_Actual
.join(df_Expected, ((df_Actual.id == df_Expected.id)
& (df_Actual.Field == df_Expected.Field)
& (df_Actual.Actual_value != df_Expected.Expected_value)))
.select([df_Actual.account_unique_id,df_Actual.Field,df_Actual.Actual_value,df_Expected.Expected_value])
)
return df_combined
I called these 2 functions as
df_Actual=Transposedf(df_Actual, ["id"],'Actual_value')
df_Expected=Transposedf(df_Expected, ["id"],'Expected_value')
#Compare the expected and actual
df_result=Compare_df(df_Expected,df_Actual)