I have a dataframe like:
data = [('valorant','web', 'start'),
('counter-strike','android', 'start'),
('sims','web', 'finished'),
]
columns = ["game","platform", "type"]
df = spark.createDataFrame(data=data, schema = columns)
df.show()
+--------------+--------+--------+
| game|platform| type|
+--------------+--------+--------+
| valorant| web| start|
|counter-strike| android| start|
| sims| web|finished|
+--------------+--------+--------+
Which I want to turn into:
+--------------+-----+
| game|count|
+--------------+-----+
| valorant| 1|
|counter-strike| 1|
| sims| 1|
| sims| 1|
+--------------+-----+
So that if type == 'finished' the new RDD should have 2 rows with value 1 instead of just one row with value 1.
Is there any way I can do this without having to map dataframe 2 times and then merge those RDDs?
If I do:
def func1(x):
if x.type == "start":
return (x.game, 1)
elif x.type == "finished":
return ((x.game, 1), (x.game, 1))
rdd2=df.rdd.map(lambda x: func1(x))
df2=rdd2.toDF(['game', 'value'])
df2.show(truncate=False)
+---------------------------+-----+
|game |value|
+---------------------------+-----+
|valorant |1 |
|counter |1 |
|[Ljava.lang.Object;#4b01785|null |
+---------------------------+-----+
It does not work obviously since func1 expects one value in return. Any ideas?
when expression + explode literal array:
from pyspark.sql import functions as F
df1 = df.withColumn(
"count",
F.explode(
F.when(F.col("type") == "start", F.array(F.lit(1)))
.when(F.col("type") == "finished", F.array(F.lit(1), F.lit(1)))
)
).drop("platform", "type")
df1.show()
#+--------------+-----+
#| game|count|
#+--------------+-----+
#| valorant| 1|
#|counter-strike| 1|
#| sims| 1|
#| sims| 1|
#+--------------+-----+
Related
in scala spark we can filter if column A value is not equal to column B or same dataframe as
df.filter(col("A")=!=col("B"))
How we can do this same in Pyspark ?
I have tried differemt options like
df.filter(~(df["A"] == df["B"])) and != operator but got errors
Take a look at this snippet:
df = spark.createDataFrame([(1, 2), (1, 1)], "id: int, val: int")
df.show()
+---+---+
| id|val|
+---+---+
| 1| 2|
| 1| 1|
+---+---+
from pyspark.sql.functions import col
df.filter(col("id") != col("val")).show()
+---+---+
| id|val|
+---+---+
| 1| 2|
+---+---+
I've two columns in my DataFrame name1 and name2.
I want to join them and count the occurrence (without Null values!).
df = spark.createDataFrame([
["Luc Krier","Jeanny Thorn"],
["Jeanny Thorn","Ben Weller"],
[ "Teddy E Beecher","Luc Krier"],
["Philippe Schauss","Jeanny Thorn"],
["Meindert I Tholen","Liam Muller"],
["Meindert I Tholen",""]
]).toDF("name1", "name2")
Desired result:
+------------------------------+
|name |Occurrence |
+------------------------------+
|Luc Krier |2 |
|Jeanny Thorn |3 |
|Teddy E Beecher |1 |
|Philippe Schauss |1 |
|Meindert I Tholen |2 |
|Liam Muller |1 |
|Ben Weller |1 |
+------------------------------+
How can I achieve this?
You can use explode with array fuction to merge the columns into one then simply group by and count, like this :
from pyspark.sql.functions import col, array, explode, count
df.select(explode(array("name1", "name2")).alias("name")) \
.filter("nullif(name, '') is not null") \
.groupBy("name") \
.agg(count("*").alias("Occurrence")) \
.show()
#+-----------------+----------+
#| name|Occurrence|
#+-----------------+----------+
#|Meindert I Tholen| 2|
#| Jeanny Thorn| 3|
#| Luc Krier| 2|
#| Teddy E Beecher| 1|
#|Philippe Schauss| 1|
#| Ben Weller| 1|
#| Liam Muller| 1|
#+-----------------+----------+
Another way is to select each column, union then group by and count:
df.select(col("name1").alias("name")).union(df.select(col("name2").alias("name"))) \
.filter("nullif(name, '') is not null")\
.groupBy("name") \
.agg(count("name").alias("Occurrence")) \
.show()
Many fancy answers out there, but the easiest solution should be to do a union and then aggregate the count:
df2 = (df.select('name1')
.union(df.select('name2'))
.filter("name1 != ''")
.groupBy('name1')
.count()
.toDF('name', 'Occurrence')
)
df2.show()
+-----------------+----------+
| name|Occurrence|
+-----------------+----------+
|Meindert I Tholen| 2|
| Jeanny Thorn| 3|
| Luc Krier| 2|
| Teddy E Beecher| 1|
|Philippe Schauss| 1|
| Ben Weller| 1|
| Liam Muller| 1|
+-----------------+----------+
There are better ways to do it. One naive way of doing it is as follows
from collections import Counter
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("OccurenceCount").getOrCreate()
df = spark.createDataFrame([
["Luc Krier","Jeanny Thorn"],
["Jeanny Thorn","Ben Weller"],
[ "Teddy E Beecher","Luc Krier"],
["Philippe Schauss","Jeanny Thorn"],
["Meindert I Tholen","Liam Muller"],
["Meindert I Tholen",""]
]).toDF("name1", "name2")
counter_dict = dict(Counter(df.select("name1", "name2").rdd.flatMap(lambda x: x).collect()))
counter_list = list(map(list, counter_dict.items()))
frequency_df = spark.createDataFrame(counter_list, ["name", "Occurrence"])
frequency_df.show()
Output:
+-----------------+----------+
| name|Occurrence|
+-----------------+----------+
| | 1|
| Liam Muller| 1|
| Teddy E Beecher| 1|
| Ben Weller| 1|
| Jeanny Thorn| 3|
| Luc Krier| 2|
|Philippe Schauss| 1|
|Meindert I Tholen| 2|
+-----------------+----------+
Does this work?
# Groupby & count both dataframes individually to reduce size.
df_name1 = (df.groupby(['name1']).count()
.withColumnRenamed('name1', 'name')
.withColumnRenamed('count', 'count1'))
df_name2 = (df.groupby(['name2']).count()
.withColumnRenamed('name2', 'name')
.withColumnRenamed('count', 'count2'))
# Join the two dataframes containing frequency counts
# Any null value in the 'count' column can be correctly interpreted as zero.
df_count = (df_name1.join(df_name2, on=['name'], how='outer')
.fillna(0, subset=['count1', 'count2']))
# Sum the two counts and drop the useless columns
df_count = (df_count.withColumn('count', df_count['count1'] + df_count['count2'])
.drop('count1').drop('count2').dropna(subset=['name']))
# (Optional) While any rows with a null name have been removed, rows with an
# empty string ("") for a name are still there. We can drop the empty name
# rows like this.
df_count = df_count[df_count['name'] != '']
df_count.show()
# +-----------------+-----+
# | name|count|
# +-----------------+-----+
# |Meindert I Tholen| 2|
# | Jeanny Thorn| 3|
# | Luc Krier| 2|
# | Teddy E Beecher| 1|
# |Philippe Schauss| 1|
# | Ben Weller| 1|
# | Liam Muller| 1|
# +-----------------+-----+
You can get the required output as follows in scala :
import org.apache.spark.sql.functions._
val df = Seq(
("Luc Krier","Jeanny Thorn"),
("Jeanny Thorn","Ben Weller"),
( "Teddy E Beecher","Luc Krier"),
("Philippe Schauss","Jeanny Thorn"),
("Meindert I Tholen","Liam Muller"),
("Meindert I Tholen","")
).toDF("name1", "name2")
val df1 = df.filter($"name1".isNotNull).filter($"name1" !==
"").groupBy("name1").agg(count("name1").as("count1"))
val df2 = df.filter($"name2".isNotNull).filter($"name2" !==
"").groupBy("name2").agg(count("name2").as("count2"))
val newdf = df1.join(df2, $"name1" === $"name2","outer").withColumn("count1",
when($"count1".isNull,0).otherwise($"count1")).withColumn("count2",
when($"count2".isNull,0).otherwise($"count2")).withColumn("Count",$"count1" +
$"count2")
val finalDF =newdf.withColumn("name",when($"name1".isNull,$"name2")
.when($"name2".isNull,$"name1").otherwise($"name1")).select("name","Count")
display(finalDF)
You can see the final output as image below :
I have a pyspark dataframe with the following schema
+-----------+---------+----------+-----------+
| userID|grouping1| grouping2| features|
+-----------+---------+----------+-----------+
|12462563356| 1| A | [5.0,43.0]|
|12462563701| 2| A | [1.0,8.0]|
|12462563701| 1| B | [2.0,12.0]|
|12462564356| 1| C | [1.0,1.0]|
|12462565487| 3| C | [2.0,3.0]|
|12462565698| 2| D | [1.0,1.0]|
|12462565698| 1| E | [1.0,1.0]|
|12462566081| 2| C | [1.0,2.0]|
|12462566081| 1| D | [1.0,15.0]|
|12462566225| 2| E | [1.0,1.0]|
|12462566225| 1| A | [9.0,85.0]|
|12462566526| 2| C | [1.0,1.0]|
|12462566526| 1| D | [3.0,79.0]|
|12462567006| 2| D |[11.0,15.0]|
|12462567006| 1| B |[10.0,15.0]|
|12462567006| 3| A |[10.0,15.0]|
|12462586595| 2| B | [2.0,42.0]|
|12462586595| 3| D | [2.0,16.0]|
|12462589343| 3| E | [1.0,1.0]|
+-----------+---------+----------+-----------+
For values in grouping2 A, B, C and D I need to apply UDF_A, UDF_B, UDF_C and UDF_D respectively. Is there a way I can write something along the lines of
dataset = dataset.withColumn('outputColName', selectUDF(**params))
where selectUDF is defined as
def selectUDF(**params):
if row[grouping2] == A:
return UDF_A(**params)
elif row[grouping2] == B:
return UDF_B(**params)
elif row[grouping2] == C:
return UDF_C(**params)
elif row[grouping2] == D:
return UDF_D(**params)
Using the following example to illustrate what I'm trying to do
Yes i thought so too. I'm using the following toy code to check this
>>> df = sc.parallelize([[1,2,3], [2,3,4]]).toDF(("a", "b", "c"))
>>> df.show()
+---+---+---+
| a| b| c|
+---+---+---+
| 1| 2| 3|
| 2| 3| 4|
+---+---+---+
>>> def udf1(col):
... return col1*col1
...
>>> def udf2(col):
... return col2*col2*col2
...
>>> def select_udf(col1, col2):
... if col1 == 2:
... return udf1(col2)
... elif col1 == 3:
... return udf2(col2)
... else:
... return 0
...
>>> from pyspark.sql.functions import col
>>> from pyspark.sql.functions import udf
>>> from pyspark.sql.types import IntegerType
>>> select_udf = udf(select_udf, IntegerType())
>>> udf1 = udf(udf1, IntegerType())
>>> udf2 = udf(udf2, IntegerType())
>>> df.withColumn("outCol", select_udf(col("b"), col("c"))).show()
[Stage 9:============================================> (3 + 1) / 4]
This seems to be stuck at this stage forever. Can anyone suggest what might be wrong here?
You don't need a selectUDF, simply use when expression to apply the desired udf depending on the value of grouping2 column:
from pyspark.sql.functions import col, when
df = df.withColumn(
"outCol",
when(col("grouping2") == "A", UDF_A(*params))
.when(col("grouping2") == "B", UDF_B(*params))
.when(col("grouping2") == "C", UDF_C(*params))
.when(col("grouping2") == "D", UDF_D(*params))
)
Given two spark dataframes A and B with the same number of columns and rows, I want to compute the numerical difference between the two dataframes and store it into another dataframe (or another data structure optionally).
For instance let us have the following datasets
DataFrame A:
+----+---+
| A | B |
+----+---+
| 1| 0|
| 1| 0|
+----+---+
DataFrame B:
----+---+
| A | B |
+----+---+
| 1| 0 |
| 0| 0 |
+----+---+
How to obtain B-A, i.e
+----+---+
| c1 | c2|
+----+---+
| 0| 0 |
| -1| 0 |
+----+---+
In practice the real dataframes have a consequent number of rows and 50+ columns for which the difference need to be computed. What is the Spark/Scala way of doing it?
I was able to solve this by using the approach below. This code can work with any number of columns. You just have to change the input DFs accordingly.
import org.apache.spark.sql.Row
val df0 = Seq((1, 5), (1, 4)).toDF("a", "b")
val df1 = Seq((1, 0), (3, 2)).toDF("a", "b")
val columns = df0.columns
val rdd = df0.rdd.zip(df1.rdd).map {
x =>
val arr = columns.map(column =>
x._2.getAs[Int](column) - x._1.getAs[Int](column))
Row(arr: _*)
}
spark.createDataFrame(rdd, df0.schema).show(false)
Output generated:
df0=>
+---+---+
|a |b |
+---+---+
|1 |5 |
|1 |4 |
+---+---+
df1=>
+---+---+
|a |b |
+---+---+
|1 |0 |
|3 |2 |
+---+---+
Output=>
+---+---+
|a |b |
+---+---+
|0 |-5 |
|2 |-2 |
+---+---+
If your df A is the same as df B you can try below approach. I don't know if this will work correct for large datasets, it will be better to have id for joining already instead of creating it using monotonically_increasing_id().
import spark.implicits._
import org.apache.spark.sql.functions._
val df0 = Seq((1, 0), (1, 0)).toDF("a", "b")
val df1 = Seq((1, 0), (0, 0)).toDF("a", "b")
// new cols names
val colNamesA = df0.columns.map("A_" + _)
val colNamesB = df0.columns.map("B_" + _)
// rename cols and add id
val dfA = df0.toDF(colNamesA: _*)
.withColumn("id", monotonically_increasing_id())
val dfB = df1.toDF(colNamesB: _*)
.withColumn("id", monotonically_increasing_id())
dfA.show()
dfB.show()
// get columns without id
val dfACols = dfA.columns.dropRight(1).map(dfA(_))
val dfBCols = dfB.columns.dropRight(1).map(dfB(_))
// diff between cols
val calcCols = (dfACols zip dfBCols).map(s=>s._2-s._1)
// join dfs
val joined = dfA.join(dfB, "id")
joined.show()
calcCols.foreach(_.explain(true))
joined.select(calcCols:_*).show()
+---+---+---+
|A_a|A_b| id|
+---+---+---+
| 1| 0| 0|
| 1| 0| 1|
+---+---+---+
+---+---+---+
|B_a|B_b| id|
+---+---+---+
| 1| 0| 0|
| 0| 0| 1|
+---+---+---+
+---+---+---+---+---+
| id|A_a|A_b|B_a|B_b|
+---+---+---+---+---+
| 0| 1| 0| 1| 0|
| 1| 1| 0| 0| 0|
+---+---+---+---+---+
(B_a#26 - A_a#18)
(B_b#27 - A_b#19)
+-----------+-----------+
|(B_a - A_a)|(B_b - A_b)|
+-----------+-----------+
| 0| 0|
| -1| 0|
+-----------+-----------+
I want to generate a when clause based on values in a dict. Its very similar to what's being done How do I use multiple conditions with pyspark.sql.funtions.when()?
Only I want to pass a dict of cols and values
Let's say I have a dict:
{
'employed': 'Y',
'athlete': 'N'
}
I want to use that dict to generate the equivalent of:
df.withColumn("call_person",when((col("employed") == "Y") & (col("athlete") == "N"), "Y")
So the end result is:
+---+-----------+--------+-------+
| id|call_person|employed|athlete|
+---+-----------+--------+-------+
| 1| Y | Y | N |
| 2| N | Y | Y |
| 3| N | N | N |
+---+-----------+--------+-------+
Note part of the reason I want to do it programmatically is I have different length dicts (number of conditions)
Use reduce() function:
from functools import reduce
from pyspark.sql.functions import when, col
# dictionary
d = {
'employed': 'Y',
'athlete': 'N'
}
# set up the conditions, multiple conditions merged with `&`
cond = reduce(lambda x,y: x&y, [ col(c) == v for c,v in d.items() if c in df.columns ])
# set up the new column
df.withColumn("call_person", when(cond, "Y").otherwise("N")).show()
+---+--------+-------+-----------+
| id|employed|athlete|call_person|
+---+--------+-------+-----------+
| 1| Y| N| Y|
| 2| Y| Y| N|
| 3| N| N| N|
+---+--------+-------+-----------+
you can access dictionary items directly also:
dict ={
'code': 'b',
'amt': '4'
}
list = [(1, 'code'),(1,'amt')]
df=spark.createDataFrame(list, ['id', 'dict_key'])
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
user_func = udf (lambda x: dict.get(x), StringType())
newdf = df.withColumn('new_column',user_func(df.dict_key))
>>> newdf.show();
+---+--------+----------+
| id|dict_key|new_column|
+---+--------+----------+
| 1| code| b|
| 1| amt| 4|
+---+--------+----------+
or broadcasting a dictionary
broadcast_dict = sc.broadcast(dict)
def my_func(key):
return broadcast_dict.value.get(key)
new_my_func = udf(my_func, StringType())
newdf = df.withColumn('new_column',new_my_func(df.dict_key))
>>> newdf.show();
+---+--------+----------+
| id|dict_key|new_column|
+---+--------+----------+
| 1| code| b|
| 1| amt| 4|
+---+--------+----------+