Dot product in pyspark dataframes with MLLIB - python

I have a very simple dataframe in pyspark, something like this:
from pyspark.sql import Row
from pyspark.mllib.linalg import DenseVector
row = Row("a", "b")
df = spark.sparkContext.parallelize([
offer_row(DenseVector([1, 1, 1]), DenseVector([1, 0, 0])),
]).toDF()
and I would like to compute the dot product of these vectors without resorting to a UDF call.
The spark MLLIB documentation references a dot method on DenseVectors but if I try to apply this as follows:
df_offers = df_offers.withColumn("c", col("a").dot(col("b")))
I get errors like:
TypeError: 'Column' object is not callable
Does anyone know if these mllib methods are able to be called on DataFrame objects?

Here, you're applying the dot method on a column and not on a DenseVector, which indeed does not work :
df_offers = df_offers.withColumn("c", col("a").dot(col("b")))
You will have to use an udf :
from pyspark.sql.functions import udf, array
from pyspark.sql.types import DoubleType
def dot_fun(array):
return array[0].dot(array[1])
dot_udf = udf(dot_fun, DoubleType())
df_offers = df_offers.withColumn("c", dot_udf(array('a', 'b')))

There are not. You'll have to use an udf:
from pyspark.sql.functions import udf
#udf("double")
def dot(x, y):
if x is not None and y is not None:
return float(x.dot(y))

You can multiply two columns without using UDF by first converting them into BlockMatrix and multiplying them like the example below
from pyspark.mllib.linalg.distributed import IndexedRow, IndexedRowMatrix
ac = offer_row.select('a')
bc = offer_row.select('a')
mata = IndexedRowMatrix(ac.rdd.map(lambda row: IndexedRow(*row)))
matb = IndexedRowMatrix(bc.rdd.map(lambda row: IndexedRow(*row)))
ma = mata.toBlockMatrix(100,100)
mb = matb.toBlockMatrix(100,100)
ans = ma.multiply(mb.transpose())

This is a hack but might be more performant than a Python udf. You could just convert the dot product into SQL:
import pandas as pd
from pyspark.sql.functions import expr
coefs = pd.Series({'a': 1.0, 'b': 2.0})
dot_sql = ' + '.join(
'{} * {}'.format(coef, colname)
for colname, coef
in coefs.items()
)
dot_expr = expr(dot_sql)
df.withColumn('dot_product', dot_expr)

Related

error of finding max value index from pyspark dataframe vector column

I would like to find the index of the max value in a vector column of spark dataframe by pyspark.
my spark is
3.0.0
The df :
id val (vector (nullable = true))
516 0: 1 1: 10 2: [] 3:[0.162, 0.511, 0.022, ....]
Is this a sparse vector ?
How to access the array ?
[0.162, 0.511, 0.022, ....]
base on How to find the index of the maximum value in a vector column?, How to get the index of the highest value in a list per row in a Spark DataFrame? [PySpark], How to find the argmax of a vector in PySpark ML
it looks like a dense vector ?
My code:
import pyspark.sql.functions as F
from pyspark.ml.functions import vector_to_array
from pyspark.sql.types import IntegerType
from pyspark.sql.functions import vector_to_array
def max_index(a_col):
if not a_col:
return a_col
if isinstance(a_col, SparseVector):
a_col = DenseVector(a_col)
a_col = vector_to_array(a_col)
return np.argmax(a_col)
my_f = F.udf(max_index, IntegerType())
t = df.withColumn("max_index_col", my_f("val")) # this returned a None type because ""max_index" did not work.
t.show()
error:
AttributeError: 'NoneType' object has no attribute '_jvm'
I have tried all solutions mentioned in the above links. But, none of them work.
Did I missed something ?
thanks
UPDATE, I also tried:
vec_to_array = F.udf(lambda v: v.toArray().tolist(), ArrayType(FloatType()))
def find_max_index(v):
return F.array_position(v, F.array_max(v))
t = df.withColumn("array_col", vec_to_array(F.col("features")))
t.withColumn("max_index", find_max_index(F.col("array_col"))).show(truncate=False)
the same error.
For Spark >= 3.0.0 vector_to_array can be used to transform the vector into an array. Then the index of the maximum value can be found with an sql expression:
from pyspark.ml.functions import vector_to_array
df.withColumn("array", vector_to_array("vector")) \
.withColumn("max_index_col", F.expr("array_position(array,array_max(array))")) \
.drop("array") \
.show()

Dataframe with arrays and key-pairs

I have a JSON structure which I need to convert it into data-frame. I have converted through pandas library but I am having issues in two columns where one is an array and the other one is key-pair value.
Pito Value
{"pito-key": "Number"} [{"WRITESTAMP": "2018-06-28T16:30:36Z", "S":"41bbc22","VALUE":"2"}]
How to break columns into the data-frames.
As far as I understood your question, you can apply regular expressions to do that.
import pandas as pd
import re
data = {'pito':['{"pito-key": "Number"}'], 'value':['[{"WRITESTAMP": "2018-06-28T16:30:36Z", "S":"41bbc22","VALUE":"2"}]']}
df = pd.DataFrame(data)
def get_value(s):
s = s[1]
v = re.findall(r'VALUE\":\".*\"', s)
return int(v[0][8:-1])
def get_pito(s):
s = s[0]
v = re.findall(r'key\": \".*\"', s)
return v[0][7:-1]
df['value'] = df.apply(get_value, axis=1)
df['pito'] = df.apply(get_pito, axis=1)
df.head()
Here I create 2 functions that transform your scary strings to values you want them to have
Let me know if that's not what you meant

How to use pandas UDF in pyspark and return result in StructType

How can I drive a column based on panda-udf in pyspark. I've written udf as below:
from pyspark.sql.functions import pandas_udf, PandasUDFType
#pandas_udf("in_type string, in_var string, in_numer int", PandasUDFType.GROUPED_MAP)
def getSplitOP(in_data):
if in_data is None or len(in_data) < 1:
return None
#Input/variable.12-2017
splt=in_data.split("/",1)
in_type=splt[0]
splt_1=splt[1].split(".",1)
in_var = splt_1[0]
splt_2=splt_1[1].split("-",1)
in_numer=int(splt_2[0])
return (in_type, in_var, in_numer)
#Expected output: ("input", "variable", 12)
df = df.withColumn("splt_col", getSplitOP(df.In_data))
Can someone help me out to identify, what's wrong with above code, and why it's not working.
This will work:
df = spark.createDataFrame([("input/variable.12-2017",), ("output/invariable.11-2018",)], ("in_data",))
df.show()
from pyspark.sql.functions import pandas_udf, PandasUDFType
#pandas_udf("in_type string, in_var string, in_numer int", PandasUDFType.GROUPED_MAP)
def getSplitOP(pdf):
in_data = pdf.in_data
#Input/variable.12-2017
splt = in_data.apply(lambda x: x.split("/",1))
in_type = splt.apply(lambda x: x[0])
splt_1 = splt.apply(lambda x: x[1].split(".",1))
in_var = splt_1.apply(lambda x: x[0])
splt_2 = splt_1.apply(lambda x: x[1].split("-",1))
in_numer = splt_2.apply(lambda x: int(x[0]))
return pd.DataFrame({"in_type": in_type, "in_var": in_var, "in_numer": in_numer})
#Expected output: ("input", "variable", 12)
df = df.groupBy().apply(getSplitOP)
df.show()
There must not be a blank line after #pandas_udf.
pandas Series objects don't directly support string functions such as split. Use apply to operate elementwise on each Series.
You used a GROUPED_MAP in order to return multiple columns, but your code isn't inherently grouped by anything. Note that groupBy is used without any arguments here. This requires all the data to fit on a single processor.

how to get elements from a probability Column prediction in a pyspark model [duplicate]

I have a dataframe df with a VectorUDT column named features. How do I get an element of the column, say first element?
I've tried doing the following
from pyspark.sql.functions import udf
first_elem_udf = udf(lambda row: row.values[0])
df.select(first_elem_udf(df.features)).show()
but I get a net.razorvine.pickle.PickleException: expected zero arguments for construction of ClassDict(for numpy.dtype) error. Same error if I do first_elem_udf = first_elem_udf(lambda row: row.toArray()[0]) instead.
I also tried explode() but I get an error because it requires an array or map type.
This should be a common operation, I think.
Convert output to float:
from pyspark.sql.types import DoubleType
from pyspark.sql.functions import lit, udf
def ith_(v, i):
try:
return float(v[i])
except ValueError:
return None
ith = udf(ith_, DoubleType())
Example usage:
from pyspark.ml.linalg import Vectors
df = sc.parallelize([
(1, Vectors.dense([1, 2, 3])),
(2, Vectors.sparse(3, [1], [9]))
]).toDF(["id", "features"])
df.select(ith("features", lit(1))).show()
## +-----------------+
## |ith_(features, 1)|
## +-----------------+
## | 2.0|
## | 9.0|
## +-----------------+
Explanation:
Output values have to be reserialized to equivalent Java objects. If you want to access values (beware of SparseVectors) you should use item method:
v.values.item(0)
which return standard Python scalars. Similarly if you want to access all values as a dense structure:
v.toArray().tolist()
If you prefer using spark.sql, you can use the follow custom function 'to_array' to convert the vector to array. Then you can manipulate it as an array.
from pyspark.sql.types import ArrayType, DoubleType
def to_array_(v):
return v.toArray().tolist()
from pyspark.sql import SQLContext
sqlContext=SQLContext(spark.sparkContext, sparkSession=spark, jsqlContext=None)
sqlContext.udf.register("to_array",to_array_, ArrayType(DoubleType()))
example
from pyspark.ml.linalg import Vectors
df = sc.parallelize([
(1, Vectors.dense([1, 2, 3])),
(2, Vectors.sparse(3, [1], [9]))
]).toDF(["id", "features"])
df.createOrReplaceTempView("tb")
spark.sql("""select * , to_array(features)[1] Second from tb """).toPandas()
output
id features Second
0 1 [1.0, 2.0, 3.0] 2.0
1 2 (0.0, 9.0, 0.0) 9.0
I ran into the same problem with not being able to use explode(). One thing you can do is use VectorSlice from the pyspark.ml.feature library. Like so:
from pyspark.ml.feature import VectorSlicer
from pyspark.ml.linalg import Vectors
from pyspark.sql.types import Row
slicer = VectorSlicer(inputCol="features", outputCol="features_one", indices=[0])
output = slicer.transform(df)
output.select("features", "features_one").show()
For anyone trying to split the probability columns generated after training a PySpark ML model into usable columns. This does not use UDF or numpy. And this will only work for binary classification. Here lr_pred is the dataframe which has the predictions from the Logistic Regression Model.
prob_df1=lr_pred.withColumn("probability",lr_pred["probability"].cast("String"))
prob_df =prob_df1.withColumn('probabilityre',split(regexp_replace("probability", "^\[|\]", ""), ",")[1].cast(DoubleType()))
Since Spark 3.0.0 this can be done without using UDF.
from pyspark.ml.functions import vector_to_array
https://discuss.dizzycoding.com/how-to-split-vector-into-columns-using-pyspark/
Why is Vector[Double] is used in the results? That's not a very nice data type.

Create Spark DataFrame. Can not infer schema for type

Could someone help me solve this problem I have with Spark DataFrame?
When I do myFloatRDD.toDF() I get an error:
TypeError: Can not infer schema for type: type 'float'
I don't understand why...
Example:
myFloatRdd = sc.parallelize([1.0,2.0,3.0])
df = myFloatRdd.toDF()
Thanks
SparkSession.createDataFrame, which is used under the hood, requires an RDD / list of Row/tuple/list/dict* or pandas.DataFrame, unless schema with DataType is provided. Try to convert float to tuple like this:
myFloatRdd.map(lambda x: (x, )).toDF()
or even better:
from pyspark.sql import Row
row = Row("val") # Or some other column name
myFloatRdd.map(row).toDF()
To create a DataFrame from a list of scalars you'll have to use SparkSession.createDataFrame directly and provide a schema***:
from pyspark.sql.types import FloatType
df = spark.createDataFrame([1.0, 2.0, 3.0], FloatType())
df.show()
## +-----+
## |value|
## +-----+
## | 1.0|
## | 2.0|
## | 3.0|
## +-----+
but for a simple range it would be better to use SparkSession.range:
from pyspark.sql.functions import col
spark.range(1, 4).select(col("id").cast("double"))
* No longer supported.
** Spark SQL also provides a limited support for schema inference on Python objects exposing __dict__.
*** Supported only in Spark 2.0 or later.
from pyspark.sql.types import IntegerType, Row
mylist = [1, 2, 3, 4, None ]
l = map(lambda x : Row(x), mylist)
# notice the parens after the type name
df=spark.createDataFrame(l,["id"])
df.where(df.id.isNull() == False).show()
Basiclly, you need to init your int into Row(), then we can use the schema
Inferring the Schema Using Reflection
from pyspark.sql import Row
# spark - sparkSession
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
orders = sc.textFile("/practicedata/orders")
#Split on delimiters
parts = orders.map(lambda l: l.split(","))
#Convert to Row
orders_struct = parts.map(lambda p: Row(order_id=int(p[0]), order_date=p[1], customer_id=p[2], order_status=p[3]))
for i in orders_struct.take(5): print(i)
#convert the RDD to DataFrame
orders_df = spark.createDataFrame(orders_struct)
Programmatically Specifying the Schema
from pyspark.sql import Row
# spark - sparkSession
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
orders = sc.textFile("/practicedata/orders")
#Split on delimiters
parts = orders.map(lambda l: l.split(","))
#Convert to tuple
orders_struct = parts.map(lambda p: (p[0], p[1], p[2], p[3].strip()))
#convert the RDD to DataFrame
orders_df = spark.createDataFrame(orders_struct)
# The schema is encoded in a string.
schemaString = "order_id order_date customer_id status"
fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]
schema = Struct
ordersDf = spark.createDataFrame(orders_struct, schema)
Type(fields)
from pyspark.sql import Row
myFloatRdd.map(lambda x: Row(x)).toDF()

Categories