Pyspark: add one row dynamically into the final dataframe - azure

I've a final dataframe with this format:
Product_ID: string
Product_COD: string
Product_NAM: string
Product_VER: integer
ProductLine_NAM: string
Language_COD: string
ProductType_NAM: string
Load_DAT: integer
LoadEnd_DAT:integer
edmChange_DTT: timestamp
and I want to add a new row to that dataframe where the ID (Product_ID) is -1 and in the string columns insert 'Unknown' and in the remaining datatypes set to "null" for example:
I created this code:
id_column = "Product_ID"
df_lessOne = spark.createDataFrame(["-1"], "string").toDF(id_column) #create a new id_column row with -1
appended_df = finalDf.unionByName(df_lessOne, allowMissingColumns=True) #add the rest columns of dataframe with nulls
appended_df_filter = appended_df.filter(""+ id_column + " = '-1'")
columns = [item[0] for item in appended_df_filter.dtypes if item[1].startswith('string')] #select only string columns
# replace string columns with "Unknown"
for c_na in columns:
appended_df_filter = (appended_df_filter
.filter(""+ id_column + " = '-1'")
.withColumn(c_na, lit('Unknown'))
)
appended_df = appended_df.filter(""+ id_column + " <> '-1'")
dfs = [appended_df, appended_df_filter]
#add final -1 row to the final dataframe
finalDf = reduce(DataFrame.unionAll, dfs)
display(finalDf)
but unfortunately, it's not working well.
I'm trying to create this dynamically because after I want to use it in other dataframes. I just need to change the id_column after.
Can anyone please help me in achieving this
Thank you!

from pyspark.sql.types import *
from datetime import datetime
import pyspark.sql.functions as F
data2 = [
("xp3980","2103","Product_1",1,"PdLine_23","XX1","PNT_1",2,36636,datetime.strptime('2020-08-20 10:00:00', '%Y-%m-%d %H:%M:%S')),
("gi9387","2411","Product_2",1,"PdLine_44","YT89","PNT_6",2,35847,datetime.strptime('2021-07-21 7:00:00', '%Y-%m-%d %H:%M:%S'))
]
schema = StructType([ \
StructField("Product_ID",StringType(),True), \
StructField("Product_COD",StringType(),True), \
StructField("Product_NAM",StringType(),True), \
StructField("Product_VER", IntegerType(),True), \
StructField("ProductLine_NAM", StringType(), True), \
StructField("Language_COD", StringType(), True), \
StructField("ProductType_NAM", StringType(), True), \
StructField("Load_DAT", IntegerType(), True), \
StructField("LoadEnd_DAT", IntegerType(), True), \
StructField("edmChange_DTT", TimestampType(), True) \
])
my_df = spark.createDataFrame(data=data2,schema=schema)
df_res = spark.createDataFrame([(-1,)]).toDF("Product_ID")
for c in my_df.schema:
if str(c.name) == 'Product_ID':
continue
if str(c.dataType) == 'StringType':
df_res = df_res.withColumn(c.name, F.lit('Unknown'))
else:
df_res = df_res.withColumn(c.name, F.lit(None))
my_df.union(df_res).show()
+----------+-----------+-----------+-----------+---------------+------------+---------------+--------+-----------+-------------------+
# |Product_ID|Product_COD|Product_NAM|Product_VER|ProductLine_NAM|Language_COD|ProductType_NAM|Load_DAT|LoadEnd_DAT| edmChange_DTT|
# +----------+-----------+-----------+-----------+---------------+------------+---------------+--------+-----------+-------------------+
# | xp3980| 2103| Product_1| 1| PdLine_23| XX1| PNT_1| 2| 36636|2020-08-20 10:00:00|
# | gi9387| 2411| Product_2| 1| PdLine_44| YT89| PNT_6| 2| 35847|2021-07-21 07:00:00|
# | -1| Unknown| Unknown| null| Unknown| Unknown| Unknown| null| null| null|
# +----------+-----------+-----------+-----------+---------------+------------+---------------+--------+-----------+-------------------+

Related

I want to replicate sql OUTER APPLY functinally in pyspark

I want to replicate what "OUTER APPLY" function in pyspark.
Here are my example data frames
## Deparment table
data = [
(1,'Engineering'),
(2,'Administration'),
(3,'Sales'),
(4,'Marketing'),
(5,'Finance')
]
schema = StructType([
StructField('DepartmentID', IntegerType(), True),
StructField('Name', StringType(), True)
])
Department = spark.createDataFrame(data=data, schema =schema)
Department.show()
+------------+--------------+
|DepartmentID| Name|
+------------+--------------+
| 1| Engineering|
| 2|Administration|
| 3| Sales|
| 4| Marketing|
| 5| Finance|
+------------+--------------+
## Employee table
data = [
(1,'Orlando', 'Gee', 1),
(2,'Keith', 'Harris', 2),
(3,'Donna', 'Carreras', 3),
(4,'Janet', 'Gates', 3),
]
schema = StructType([
StructField('EmployeeID', IntegerType(), True),
StructField('FirstName', StringType(), True),
StructField('LastName', StringType(), True),
StructField('DepartmentID', IntegerType(), True),
])
Employee = spark.createDataFrame(data=data, schema =schema)
Employee.show()
+----------+---------+--------+------------+
|EmployeeID|FirstName|LastName|DepartmentID|
+----------+---------+--------+------------+
| 1| Orlando| Gee| 1|
| 2| Keith| Harris| 2|
| 3| Donna|Carreras| 3|
| 4| Janet| Gates| 3|
+----------+---------+--------+------------+
I tried creating a temp table and use a spark SQL command to query as we normally do on temporary tables... but I keep getting
`[PARSE_SYNTAX_ERROR] Syntax error at or near 'OUTER'(line 3, pos 2)
== SQL ==
SELECT * FROM Department D
OUTER APPLY
--^^^
(
SELECT * FROM Employee E
WHERE E.DepartmentID = D.DepartmentID
) A
`
error. Any help is appreciated.
Employee.createOrReplaceTempView("Employee")
Department.createOrReplaceTempView("Department")
sql_query = """
SELECT * FROM Department D
OUTER APPLY
(
SELECT * FROM Employee E
WHERE E.DepartmentID = D.DepartmentID
) A
"""
result_df = sqlContext.sql(sql_query)
OUTER APPLY is not an option in the Spark SQL Syntax
However an OUTER APPLY command will produce the same results as an LEFT OUTER JOIN.
LEFT OUTER JOIN is an option in the Spark SQL Syntax.
Using LEFT OUTER JOIN for your example would look like this in the Spark SQL Syntax,
sql_query = """
SELECT * FROM Department D
LEFT OUTER JOIN Employee E ON E.DepartmentID = D.DepartmentID
"""
Using LEFT OUTER JOIN for your example would look like this in the PySpark Syntax,
Department.join(Employee, Employee.DepartmentID == Department.DepartmentID, "left_outer") \
.show(truncate=False)

PySpark row to struct with specified structure

This is my initial dataframe:
columns = ["CounterpartID","Year","Month","Day","churnprobability", "deadprobability"]
data = [(1234, 2021,5,12, 0.85,0.6),(1224, 2022,6,12, 0.75,0.6),(1345, 2022,5,13, 0.8,0.2),(234, 2021,7,12, 0.9,0.8)]
from pyspark.sql.types import StructType, StructField, IntegerType, DoubleType
schema = StructType([
StructField("client_id", IntegerType(), False),
StructField("year", IntegerType(), False),
StructField("month", IntegerType(), False),
StructField("day", IntegerType(), False),
StructField("churn_probability", DoubleType(), False),
StructField("dead_probability", DoubleType(), False)
])
df = spark.createDataFrame(data=data, schema=schema)
df.printSchema()
df.show(truncate=False)
Then I do some transformations on the columns (basically, separating out the float columns into before decimals and after decimals columns) to get the intermediary dataframe.
abc = df.rdd.map(lambda x: (x[0],x[1],x[2],x[3],int(x[4]),int(x[4]%1 * pow(10,9)), int(x[5]),int(x[5]%1 * pow(10,9)) )).toDF(['client_id','year', 'month', 'day', 'churn_probability_unit', 'churn_probability_nano', 'dead_probability_unit', 'dead_probability_nano'] )
display(abc)
Below is the final desired dataframe (this is just an example of one row, but of course I'll need all the rows from the intermediary dataframe.
sjson = {"clientId": {"id": 1234 },"eventDate": {"year": 2022,"month": 8,"day": 5},"churnProbability": {"rate": {"units": "500","nanos": 780000000}},"deadProbability": {"rate": {"units": "500","nanos": 780000000}}}
df = spark.read.json(sc.parallelize([sjson])).select("clientId", "eventDate", "churnProbability", "deadProbability")
display(df)
How do I reach this end state from the intermediary state efficiently for all rows?
End goal is to use this final dataframe to write to Kafka where the schema of the topic is a form of the final desired dataframe.
I would probably eliminate the use of rdd logic (and again toDF) by using just one select from your original df:
from pyspark.sql import functions as F
defg = df.select(
F.struct(F.col('client_id').alias('id')).alias('clientId'),
F.struct('year', 'month', 'day').alias('eventDate'),
F.struct(
F.struct(
F.floor('churn_probability').alias('unit'),
(F.col('churn_probability') % 1 * 10**9).cast('long').alias('nanos')
).alias('rate')
).alias('churnProbability'),
F.struct(
F.struct(
F.floor('dead_probability').alias('unit'),
(F.col('dead_probability') % 1 * 10**9).cast('long').alias('nanos')
).alias('rate')
).alias('deadProbability'),
)
defg.show()
# +--------+-------------+----------------+----------------+
# |clientId| eventDate|churnProbability| deadProbability|
# +--------+-------------+----------------+----------------+
# | {1234}|{2021, 5, 12}|{{0, 850000000}}|{{0, 600000000}}|
# | {1224}|{2022, 6, 12}|{{0, 750000000}}|{{0, 600000000}}|
# | {1345}|{2022, 5, 13}|{{0, 800000000}}|{{0, 200000000}}|
# | {234}|{2021, 7, 12}|{{0, 900000000}}|{{0, 800000000}}|
# +--------+-------------+----------------+----------------+
So, I was able to solve this using structs , without using to_json
import pyspark.sql.functions as f
defg = abc.withColumn(
"clientId",
f.struct(
f.col("client_id").
alias("id")
)).withColumn(
"eventDate",
f.struct(
f.col("year").alias("year"),
f.col("month").alias("month"),
f.col("day").alias("day"),
)
).withColumn(
"churnProbability",
f.struct( f.struct(
f.col("churn_probability_unit").alias("unit"),
f.col("churn_probability_nano").alias("nanos")
).alias("rate")
)
).withColumn(
"deadProbability",
f.struct( f.struct(
f.col("dead_probability_unit").alias("unit"),
f.col("dead_probability_nano").alias("nanos")
).alias("rate")
)
).select ("clientId","eventDate","churnProbability", "deadProbability" )

How do find out the total amount for each month using spark in python

I'm looking for a way to aggregate by month my data. I want firstly to keep only month in my visitdate. My DataFrame looks like this:
Row(visitdate = 1/1/2013,
patientid = P1_Pt1959,
amount = 200,
note = jnut,
)
My objectif subsequently is to group by visitdate and calculate the sum of amount. I tried this :
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
file_path = "G:/Visit Data.csv"
patients = spark.read.csv(file_path,header = True)
patients.createOrReplaceTempView("visitdate")
sqlDF = spark.sql("SELECT visitdate,SUM(amount) as totalamount from visitdate GROUP BY visitdate")
sqlDF.show()
This is the result :
visitdate|totalamount|
+----------+-----------+
| 9/1/2013| 10800.0|
|25/04/2013| 12440.0|
|27/03/2014| 16930.0|
|26/03/2015| 18560.0|
|14/05/2013| 13770.0|
|30/06/2013| 13880.0
My objectif is to get something like this:
visitdate|totalamount|
+----------+-----------+
|1/1/2013| 10800.0|
|1/2/2013| 12440.0|
|1/3/2013| 16930.0|
|1/4/2014| 18560.0|
|1/5/2015| 13770.0|
|1/6/2015| 13880.0|
You need to truncate your date's down to months so they group properly, then do a groupBy/sum. There is a spark function to do this for you call date_trunc. For example.
from datetime import date
from pyspark.sql.functions import date_trunc, sum
data = [
(date(2000, 1, 2), 1000),
(date(2000, 1, 2), 2000),
(date(2000, 2, 3), 3000),
(date(2000, 2, 4), 4000),
]
df = spark.createDataFrame(sc.parallelize(data), ["date", "amount"])
df.groupBy(date_trunc("month", df.date)).agg(sum("amount"))
+-----------------------+-----------+
|date_trunc(month, date)|sum(amount)|
+-----------------------+-----------+
| 2000-01-01 00:00:00| 3000|
| 2000-02-01 00:00:00| 7000|
+-----------------------+-----------+

Set schema in pyspark dataframe read.csv with null elements

I have a data set (example) that when imported with
df = spark.read.csv(filename, header=True, inferSchema=True)
df.show()
will assign the column with 'NA' as a stringType(), where I would like it to be IntegerType() (or ByteType()).
I then tried to set
schema = StructType([
StructField("col_01", IntegerType()),
StructField("col_02", DateType()),
StructField("col_03", IntegerType())
])
df = spark.read.csv(filename, header=True, schema=schema)
df.show()
The output shows the entire row with 'col_03' = null to be null.
However col_01 and col_02 return appropriate data if they are called with
df.select(['col_01','col_02']).show()
I can find a way around this by post casting the data type of col_3
df = spark.read.csv(filename, header=True, inferSchema=True)
df = df.withColumn('col_3',df['col_3'].cast(IntegerType()))
df.show()
, but I think it is not ideal and would be much better if I can assign the data type for each column directly with setting schema.
Would anyone be able to guide me what I do incorrectly? Or casting the data types after importing is the only solution? Any comment regarding performance of the two approaches (if we can make assigning schema to work) is also welcome.
Thank you,
You can set a new null value in spark's csv loader using nullValue:
for a csv file looking like this:
col_01,col_02,col_03
111,2007-11-18,3
112,2002-12-03,4
113,2007-02-14,5
114,2003-04-16,NA
115,2011-08-24,2
116,2003-05-03,3
117,2001-06-11,4
118,2004-05-06,NA
119,2012-03-25,5
120,2006-10-13,4
and forcing schema:
from pyspark.sql.types import StructType, IntegerType, DateType
schema = StructType([
StructField("col_01", IntegerType()),
StructField("col_02", DateType()),
StructField("col_03", IntegerType())
])
You'll get:
df = spark.read.csv(filename, header=True, nullValue='NA', schema=schema)
df.show()
df.printSchema()
+------+----------+------+
|col_01| col_02|col_03|
+------+----------+------+
| 111|2007-11-18| 3|
| 112|2002-12-03| 4|
| 113|2007-02-14| 5|
| 114|2003-04-16| null|
| 115|2011-08-24| 2|
| 116|2003-05-03| 3|
| 117|2001-06-11| 4|
| 118|2004-05-06| null|
| 119|2012-03-25| 5|
| 120|2006-10-13| 4|
+------+----------+------+
root
|-- col_01: integer (nullable = true)
|-- col_02: date (nullable = true)
|-- col_03: integer (nullable = true)
Try this once - (But this will read every column as string type. You can type caste as per your requirement)
import csv
from pyspark.sql.types import IntegerType
data = []
with open('filename', 'r' ) as doc:
reader = csv.DictReader(doc)
for line in reader:
data.append(line)
df = sc.parallelize(data).toDF()
df = df.withColumn("col_03", df["col_03"].cast(IntegerType()))

Spark: return null from failed regexp_extract()

Suppose you try to extract a substring from a column of a dataframe. regexp_extract() returns a null if the field itself is null, but returns an empty string if field is not null but the expression is not found. How can you return a null value for the latter case?
df = spark.createDataFrame([(None),('foo'),('foo_bar')], StringType())
df.select(regexp_extract('value', r'_(.+)', 1).alias('extracted')).show()
# +---------+
# |extracted|
# +---------+
# | null|
# | |
# | bar|
# +---------+
I'm not sure if regexp_extract() could ever return None for a String type. One thing you could do is replace empty strings with None using a user defined function:
from pyspark.sql.functions import regexp_extract, udf
from pyspark.sql.types import StringType
df = spark.createDataFrame([(None),('foo'),('foo_bar')], StringType())
toNoneUDF = udf(lambda val: None if val == "" else val, StringType())
new_df = df.select(regexp_extract('value', r'_(.+)', 1).alias('extracted'))
new_df.withColumn("extracted", toNoneUDF(new_df.extracted)).show()
This should work:
df = spark.createDataFrame([(None),('foo'),('foo_bar')], StringType())
df = df.select(regexp_extract('value', r'_(.+)', 1).alias('extracted'))
df.withColumn(
'extracted',
when(col('extracted') != '', col('extracted'), lit(None))
).show()
In spark SQL, I've found a solution to count the number of regex occurrence, ignoring null values:
SELECT COUNT(CASE WHEN rlike(col, "_(.+)") THEN 1 END)
FROM VALUES (NULL), ("foo"), ("foo_bar"), ("") AS tab(col);
Result:
1
I hope this will help some of you.

Resources