I want to validate a date column for a PySpark dataframe. I know how to do it for pandas, but can't make it work for PySpark.
import pandas as pd
import datetime
from datetime import datetime
data = [['Alex',10, '2001-01-12'],['Bob',12, '2005-10-21'],['Clarke',13, '2003-12-41']]
df = pd.DataFrame(data,columns=['Name','Sale_qty', 'DOB'])
sparkDF =spark.createDataFrame(df)
def validate(date_text):
try:
if date_text != datetime.strptime(date_text, "%Y-%m-%d").strftime('%Y-%m-%d'):
raise ValueError
return True
except ValueError:
return False
df = df['DOB'].apply(lambda x: validate(x))
print(df)
It works for pandas dataframe. But I can't make it work for PySpark. Getting the following error:
sparkDF = sparkDF['DOB'].apply(lambda x: validate(x))
TypeError Traceback (most recent call last)
<ipython-input-83-5f5f1db1c7b3> in <module>
----> 1 sparkDF = sparkDF['DOB'].apply(lambda x: validate(x))
TypeError: 'Column' object is not callable
You could use the following column expression:
F.to_date('DOB', 'yyyy-M-d').isNotNull()
Full test:
from pyspark.sql import functions as F
data = [['Alex', 10, '2001-01-12'], ['Bob', 12, '2005'], ['Clarke', 13, '2003-12-41']]
df = spark.createDataFrame(data, ['Name', 'Sale_qty', 'DOB'])
validation = F.to_date('DOB', 'yyyy-M-d').isNotNull()
df.withColumn('validation', validation).show()
# +------+--------+----------+----------+
# | Name|Sale_qty| DOB|validation|
# +------+--------+----------+----------+
# | Alex| 10|2001-01-12| true|
# | Bob| 12| 2005| false|
# |Clarke| 13|2003-12-41| false|
# +------+--------+----------+----------+
you can use a to_date() with the required source date format. It returns null where the format is incorrect, which can be used for validation.
see below example.
spark.sparkContext.parallelize([('01-12-2001',), ('2001-01-12',)]).toDF(['dob']). \
withColumn('correct_date_format', func.to_date('dob', 'yyyy-MM-dd').isNotNull()). \
show()
# +----------+-------------------+
# | dob|correct_date_format|
# +----------+-------------------+
# |01-12-2001| false|
# |2001-01-12| true|
# +----------+-------------------+
Related
I am trying to format the string in one the columns using pyspark udf.
Below is my dataset:
+--------------------+--------------------+
| artists| id|
+--------------------+--------------------+
| ['Mamie Smith']|0cS0A1fUEUd1EW3Fc...|
|"[""Screamin' Jay...|0hbkKFIJm7Z05H8Zl...|
| ['Mamie Smith']|11m7laMUgmOKqI3oY...|
| ['Oscar Velazquez']|19Lc5SfJJ5O1oaxY0...|
| ['Mixe']|2hJjbsLCytGsnAHfd...|
|['Mamie Smith & H...|3HnrHGLE9u2MjHtdo...|
| ['Mamie Smith']|5DlCyqLyX2AOVDTjj...|
|['Mamie Smith & H...|02FzJbHtqElixxCmr...|
|['Francisco Canaro']|02i59gYdjlhBmbbWh...|
| ['Meetya']|06NUxS2XL3efRh0bl...|
| ['Dorville']|07jrRR1CUUoPb1FLf...|
|['Francisco Canaro']|0ANuF7SvPeIHanGcC...|
| ['Ka Koula']|0BEO6nHi1rmTOPiEZ...|
| ['Justrock']|0DH1IROKoPK5XTglU...|
| ['Takis Nikolaou']|0HVjPaxbyfFcg8Rh0...|
|['Aggeliki Karagi...|0Hn7LWy1YcKhPaA2N...|
|['Giorgos Katsaros']|0I6DjrEfd3fKFESHE...|
|['Francisco Canaro']|0KGiP9EW1xtojDHsT...|
|['Giorgos Katsaros']|0KNI2d7l3ByVHU0g2...|
| ['Amalia Vaka']|0LYNwxHYHPW256lO2...|
+--------------------+--------------------+
And code:
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
import pyspark.sql.types as t
import logging as log
session = SparkSession.builder.master("local").appName("First Python App").getOrCreate()
df = session.read.option("header", "true").csv("/home/deepak/Downloads/spotify_data_Set/data.csv")
df = df.select("artists", "id")
# df = df.withColumn("new_atr",f.translate(f.col("artists"),'"', "")).\
# withColumn("new_atr_2" , f.translate(f.col("artists"),'[', ""))
df.show()
def format_column(st):
print(type(st))
print(1)
return st.upper()
session.udf.register("format_str", format_column)
df.select("id",format_column(df.artists)).show(truncate=False)
# schema = t.StructType(
# [
# t.StructField("artists", t.ArrayType(t.StringType()), True),
# t.StructField("id", t.StringType(), True)
#
# ]
# )
df.show(truncate=False)
The UDF is still not complete but with the error, I am not able to move further. When I run the above code I am getting below error:
<class 'pyspark.sql.column.Column'>
1
Traceback (most recent call last):
File "/home/deepak/PycharmProjects/Spark/src/test.py", line 25, in <module>
df.select("id",format_column(df.artists)).show(truncate=False)
File "/home/deepak/PycharmProjects/Spark/src/test.py", line 18, in format_column
return st.upper()
TypeError: 'Column' object is not callable
The syntax looks fine and I am not able to figure out what wrong with the code.
You get this error because you are calling the python function format_column instead of the registered UDF format_str.
You should be using :
from pyspark.sql import functions as F
df.select("id", F.expr("format_str(artists)")).show(truncate=False)
Moreover, the way you registered the UDF you can't use it with DataFrame API but only in Spark SQL. If you want to use it within DataFrame API you should define the function like this :
format_str = F.udf(format_column, StringType())
df.select("id", format_str(df.artists)).show(truncate=False)
Or using annotation syntax:
#F.udf("string")
def format_column(st):
print(type(st))
print(1)
return st.upper()
df.select("id", format_column(df.artists)).show(truncate=False)
That said, you should use Spark built-in functions (upper in this case) unless you have a specific need that can't be done using Spark functions.
well , I see that you are using a predined spark function in the definition of an UDF which is acceptable as you said that you are starting with some examples , your error means that there is no method called upper for a column however you can correct that error using this defintion:
#f.udf("string")
def format_column(st):
print(type(st))
print(1)
return st.upper()
for example :
I am getting duplicates when joining on two dataframes where one key is a decimal and the other is a string. It seems that Spark is converting the decimal to a string which results in a scientific notation expression, but then shows the original result in decimal form just fine. I found a workaround by converting to string directly, but this seems dangerous as duplicates are created without warning.
Is this a bug? How can I detect when this is happening?
Here's an demo in pyspark on Spark 2.4:
>>> from pyspark.sql.functions import *
>>> from pyspark.sql.types import *
>>> df1 = spark.createDataFrame([('a', 9223372034559809871), ('b', 9223372034559809771)], ['group', 'id_int'])
>>> df1=df1.withColumn('id',col('id_int').cast(DecimalType(38,0)))
>>>
>>> df1.show()
+-----+-------------------+-------------------+
|group| id_int| id|
+-----+-------------------+-------------------+
| a|9223372034559809871|9223372034559809871|
| b|9223372034559809771|9223372034559809771|
+-----+-------------------+-------------------+
>>>
>>> df2= spark.createDataFrame([(1, '9223372034559809871'), (2, '9223372034559809771')], ['value', 'id'])
>>> df2.show()
+-----+-------------------+
|value| id|
+-----+-------------------+
| 1|9223372034559809871|
| 2|9223372034559809771|
+-----+-------------------+
>>>
>>> df1.join(df2, ["id"]).show()
+-------------------+-----+-------------------+-----+
| id|group| id_int|value|
+-------------------+-----+-------------------+-----+
|9223372034559809871| a|9223372034559809871| 1|
|9223372034559809871| a|9223372034559809871| 2|
|9223372034559809771| b|9223372034559809771| 1|
|9223372034559809771| b|9223372034559809771| 2|
+-------------------+-----+-------------------+-----+
>>> df1.dtypes
[('group', 'string'), ('id_int', 'bigint'), ('id', 'decimal(38,0)')]
It's happenning because of the values (very very large) in the joining keys:
I tweaked the values in the joining condition and it's giving me the proper results :
from pyspark.sql.types import *
df1 = spark.createDataFrame([('a', 9223372034559809871), ('b', 9123372034559809771)],
['group', 'id_int'])
df1=df1.withColumn('id',col('id_int').cast(DecimalType(38,0)))
df2= spark.createDataFrame([(1, '9223372034559809871'), (2, '9123372034559809771')],
['value', 'id'])
df1.join(df2, df1["id"]==df2["id"],"inner").show()
I am trying to groupBy and then calculate percentile on PySpark dataframe. I've tested the following piece of code according to this Stack Overflow post:
from pyspark.sql.types import FloatType
import pyspark.sql.functions as func
import numpy as np
qt_udf = func.udf(lambda x,qt: float(np.percentile(x,qt)), FloatType())
df_out = df_in.groupBy('Id').agg(func.collect_list('value').alias('data'))\
.withColumn('median', qt_udf(func.col('data'),func.lit(0.5)).cast("string"))
df_out.show()
But get the following error:
Traceback (most recent call last): > df_out.show() ....> return lambda *a: f(*a) AttributeError: 'module' object has no attribute 'percentile'
This is because of numpy version (1.4.1), the percentile function was added from version 1.5. It is not possible to update numpy version in the short term.
Define a window and use the inbuilt percent_rank function to compute percentile values.
from pyspark.sql import Window
from pyspark.sql import functions as func
w = Window.partitionBy(df_in.Id).orderBy(df_in.value) #assuming default ascending order
df_out = df_in.withColumn('percentile_col',func.percent_rank().over(w))
Question's title suggests that OP wanted to calculate percentiles. But the body shows that he needed to calculate median in groups.
Test dataset:
from pyspark.sql import SparkSession, functions as F, Window as W, Window
spark = SparkSession.builder.getOrCreate()
df_in = spark.createDataFrame(
[('1', 10),
('1', 11),
('1', 12),
('1', 13),
('2', 20)],
['Id', 'value']
)
Percentiles of given data points in groups:
w = W.partitionBy('Id').orderBy('value')
df_in = df_in.withColumn('percentile_of_value_by_Id', F.percent_rank().over(w))
df_in.show()
#+---+-----+-------------------------+
#| Id|value|percentile_of_value_by_Id|
#+---+-----+-------------------------+
#| 1| 10| 0.0|
#| 1| 11| 0.3333333333333333|
#| 1| 12| 0.6666666666666666|
#| 1| 13| 1.0|
#| 2| 20| 0.0|
#+---+-----+-------------------------+
Median (accurate and approximate):
df_out = (df_in.groupBy('Id').agg(
F.expr('percentile(value, .5)').alias('median_accurate'), # for small-mid dfs
F.percentile_approx('value', .5).alias('median_approximate') # for mid-large dfs
))
df_out.show()
#+---+---------------+------------------+
#| Id|median_accurate|median_approximate|
#+---+---------------+------------------+
#| 1| 11.5| 11|
#| 2| 20.0| 20|
#+---+---------------+------------------+
I have a dataframe resulting from a sql query
df1 = sqlContext.sql("select * from table_test")
I need to convert this dataframe to libsvm format so that it can be provided as an input for
pyspark.ml.classification.LogisticRegression
I tried to do the following. However, this resulted in the following error as I'm using spark 1.5.2
df1.write.format("libsvm").save("data/foo")
Failed to load class for data source: libsvm
I wanted to use MLUtils.loadLibSVMFile instead. I'm behind a firewall and can't directly pip install it. So I downloaded the file, scp-ed it and then manually installed it. Everything seemed to work fine but I still get the following error
import org.apache.spark.mllib.util.MLUtils
No module named org.apache.spark.mllib.util.MLUtils
Question 1: Is my above approach to convert dataframe to libsvm format in the right direction.
Question 2: If "yes" to question 1, how to get MLUtils working. If "no", what is the best way to convert dataframe to libsvm format
I would act like that (it's just an example with an arbitrary dataframe, I don't know how your df1 is done, focus is on data transformations):
This is my way to convert dataframe to libsvm format:
# ... your previous imports
from pyspark.mllib.util import MLUtils
from pyspark.mllib.regression import LabeledPoint
# A DATAFRAME
>>> df.show()
+---+---+---+
| _1| _2| _3|
+---+---+---+
| 1| 3| 6|
| 4| 5| 20|
| 7| 8| 8|
+---+---+---+
# FROM DATAFRAME TO RDD
>>> c = df.rdd # this command will convert your dataframe in a RDD
>>> print (c.take(3))
[Row(_1=1, _2=3, _3=6), Row(_1=4, _2=5, _3=20), Row(_1=7, _2=8, _3=8)]
# FROM RDD OF TUPLE TO A RDD OF LABELEDPOINT
>>> d = c.map(lambda line: LabeledPoint(line[0],[line[1:]])) # arbitrary mapping, it's just an example
>>> print (d.take(3))
[LabeledPoint(1.0, [3.0,6.0]), LabeledPoint(4.0, [5.0,20.0]), LabeledPoint(7.0, [8.0,8.0])]
# SAVE AS LIBSVM
>>> MLUtils.saveAsLibSVMFile(d, "/your/Path/nameFolder/")
What you will see on the "/your/Path/nameFolder/part-0000*" files is:
1.0 1:3.0 2:6.0
4.0 1:5.0 2:20.0
7.0 1:8.0 2:8.0
See here for LabeledPoint docs
I had to do this for it to work
D.map(lambda line: LabeledPoint(line[0],[line[1],line[2]]))
If you want to convert sparse vectors to a 'sparse' libsvm which is more efficient, try this:
from pyspark.ml.linalg import Vectors
from pyspark.mllib.linalg import Vectors as MLLibVectors
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.util import MLUtils
df = spark.createDataFrame([
(0, Vectors.sparse(5, [(1, 1.0), (3, 7.0)])),
(1, Vectors.sparse(5, [(1, 1.0), (3, 7.0)])),
(1, Vectors.sparse(5, [(1, 1.0), (3, 7.0)]))
], ["label", "features"])
df.show()
# +-----+-------------------+
# |label| features|
# +-----+-------------------+
# | 0|(5,[1,3],[1.0,7.0])|
# | 1|(5,[1,3],[1.0,7.0])|
# | 1|(5,[1,3],[1.0,7.0])|
# +-----+-------------------+
MLUtils.saveAsLibSVMFile(df.rdd.map(lambda x: LabeledPoint(x.label, MLLibVectors.fromML(x.features))), './libsvm')
Using this code to find modal :
import numpy as np
np.random.seed(1)
df2 = sc.parallelize([
(int(x), ) for x in np.random.randint(50, size=10000)
]).toDF(["x"])
cnts = df2.groupBy("x").count()
mode = cnts.join(
cnts.agg(max("count").alias("max_")), col("count") == col("max_")
).limit(1).select("x")
mode.first()[0]
from Calculate the mode of a PySpark DataFrame column?
returns error :
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-53-2a9274e248ac> in <module>()
8 cnts = df.groupBy("x").count()
9 mode = cnts.join(
---> 10 cnts.agg(max("count").alias("max_")), col("count") == col("max_")
11 ).limit(1).select("x")
12 mode.first()[0]
AttributeError: 'str' object has no attribute 'alias'
Instead of this solution I'm attempting this custom one:
df.show()
cnts = df.groupBy("c1").count()
print cnts.rdd.map(tuple).sortBy(lambda a: a[1], ascending=False).first()
cnts = df.groupBy("c2").count()
print cnts.rdd.map(tuple).sortBy(lambda a: a[1] , ascending=False).first()
which returns :
So modal of c1 & c2 are 2.0 and 3.0 respectively
Can this be applied to all columns c1,c2,c3,c4,c5 in dataframe instead of explicitly selecting each column as I have done ?
It looks like you're using built-in max, not a SQL function.
import pyspark.sql.functions as F
cnts.agg(F.max("count").alias("max_"))
To find mode over multiple columns of the same type you can reshape to long (melt as defined in Pandas Melt function in Apache Spark):
(melt(df, [], df.columns)
# Count by column and value
.groupBy("variable", "value")
.count()
# Find mode per column
.groupBy("variable")
.agg(F.max(F.struct("count", "value")).alias("mode"))
.select("variable", "mode.value"))
+--------+-----+
|variable|value|
+--------+-----+
| c5| 6.0|
| c1| 2.0|
| c4| 5.0|
| c3| 4.0|
| c2| 3.0|
+--------+-----+