I have a dictionary (variable pats) with many when arguments: conditions and values.
from pyspark.sql import functions as F
df = spark.createDataFrame([("ė",), ("2",), ("",), ("#",)], ["col1"])
pats = {
r"^\d$" :"digit",
r"^\p{L}$" :"letter",
r"^[\p{P}\p{S}]$":"spec_char",
r"^$" :"empty"
}
whens = (
F.when(F.col("col1").rlike(list(pats.keys())[0]), pats[list(pats.keys())[0]])
.when(F.col("col1").rlike(list(pats.keys())[1]), pats[list(pats.keys())[1]])
.when(F.col("col1").rlike(list(pats.keys())[2]), pats[list(pats.keys())[2]])
.when(F.col("col1").rlike(list(pats.keys())[3]), pats[list(pats.keys())[3]])
.otherwise(F.col("col1"))
)
df = df.withColumn("col2", whens)
df.show()
# +----+---------+
# |col1| col2|
# +----+---------+
# | ė| letter|
# | 2| digit|
# | | empty|
# | #|spec_char|
# +----+---------+
I'm looking for a scalable way to chain all the when conditions, so I wouldn't need to write a line for every key.
Without reduce
whens = F
for k, v in pats.items():
whens = whens.when(F.col("col1").rlike(k), v)
whens = whens.otherwise(F.col("col1"))
Full code:
from pyspark.sql import functions as F
df = spark.createDataFrame([("ė",), ("2",), ("",), ("#",)], ["col1"])
pats = {
r"^\d$" :"digit",
r"^\p{L}$" :"letter",
r"^[\p{P}\p{S}]$":"spec_char",
r"^$" :"empty"
}
whens = F
for k, v in pats.items():
whens = whens.when(F.col("col1").rlike(k), v)
whens = whens.otherwise(F.col("col1"))
df = df.withColumn("col2", whens)
df.show()
# +----+---------+
# |col1| col2|
# +----+---------+
# | ė| letter|
# | 2| digit|
# | | empty|
# | #|spec_char|
# +----+---------+
Using reduce
from functools import reduce
whens = reduce(
lambda acc, p: acc.when(F.col("col1").rlike(p), pats[p]),
pats.keys(),
F
).otherwise(F.col("col1"))
Full code:
from pyspark.sql import functions as F
from functools import reduce
df = spark.createDataFrame([("ė",), ("2",), ("",), ("#",)], ["col1"])
pats = {
r"^\d$" :"digit",
r"^\p{L}$" :"letter",
r"^[\p{P}\p{S}]$":"spec_char",
r"^$" :"empty"
}
whens = reduce(
lambda acc, p: acc.when(F.col("col1").rlike(p), pats[p]),
pats.keys(),
F
).otherwise(F.col("col1"))
df = df.withColumn("col2", whens)
df.show()
# +----+---------+
# |col1| col2|
# +----+---------+
# | ė| letter|
# | 2| digit|
# | | empty|
# | #|spec_char|
# +----+---------+
Related
I have two MS Access SQL queries which I want to convert into PySpark. The queries look like this (we have two tables Employee and Department):
UPDATE EMPLOYEE INNER JOIN [DEPARTMENT] ON
EMPLOYEE.STATEPROVINCE = [DEPARTMENT].[STATE_LEVEL]
SET EMPLOYEE.STATEPROVINCE = [DEPARTMENT]![STATE_ABBREVIATION];
UPDATE EMPLOYEE INNER JOIN [DEPARTMENT] ON
EMPLOYEE.STATEPROVINCE = [DEPARTMENT].[STATE_LEVEL]
SET EMPLOYEE.MARKET = [DEPARTMENT]![MARKET];
Test dataframes:
from pyspark.sql import functions as F
df_emp = spark.createDataFrame([(1, 'a'), (2, 'bb')], ['EMPLOYEE', 'STATEPROVINCE'])
df_emp.show()
# +--------+-------------+
# |EMPLOYEE|STATEPROVINCE|
# +--------+-------------+
# | 1| a|
# | 2| bb|
# +--------+-------------+
df_dept = spark.createDataFrame([('bb', 'b')], ['STATE_LEVEL', 'STATE_ABBREVIATION'])
df_dept.show()
# +-----------+------------------+
# |STATE_LEVEL|STATE_ABBREVIATION|
# +-----------+------------------+
# | bb| b|
# +-----------+------------------+
Running your SQL query in Microsoft Access does the following:
In PySpark, you can get it like this:
df = (df_emp.alias('a')
.join(df_dept.alias('b'), df_emp.STATEPROVINCE == df_dept.STATE_LEVEL, 'left')
.select(
*[c for c in df_emp.columns if c != 'STATEPROVINCE'],
F.coalesce('b.STATE_ABBREVIATION', 'a.STATEPROVINCE').alias('STATEPROVINCE')
)
)
df.show()
# +--------+-------------+
# |EMPLOYEE|STATEPROVINCE|
# +--------+-------------+
# | 1| a|
# | 2| b|
# +--------+-------------+
First you do a left join. Then, select.
The select has 2 parts.
First, you select everything from df_emp except for "STATEPROVINCE".
Then, for the new "STATEPROVINCE", you select "STATE_ABBREVIATION" from df_dept, but in case it's null (i.e. not existent in df_dept), you take "STATEPROVINCE" from df_emp.
For your second query, you only need to change values in the select statement:
df = (df_emp.alias('a')
.join(df_dept.alias('b'), df_emp.STATEPROVINCE == df_dept.STATE_LEVEL, 'left')
.select(
*[c for c in df_emp.columns if c != 'MARKET'],
F.coalesce('b.MARKET', 'a.MARKET').alias('MARKET')
)
)
I'm trying to find an equivalent for the following snippet (reference) to create unique id to every unique combination from two columns in PySpark.
Pandas approach:
df['my_id'] = df.groupby(['foo', 'bar'], sort=False).ngroup() + 1
I tried the following, but it's creating more ids than required:
df = df.withColumn("my_id", F.row_number().over(Window.orderBy('foo', 'bar')))
Instead of row_number, use dense_rank:
from pyspark.sql import functions as F, Window
df = spark.createDataFrame(
[('r1', 'ph1'),
('r1', 'ph1'),
('r1', 'ph2'),
('s4', 'ph3'),
('s3', 'ph2'),
('s3', 'ph2')],
['foo', 'bar'])
df = df.withColumn("my_id", F.dense_rank().over(Window.orderBy('foo', 'bar')))
df.show()
# +---+---+-----+
# |foo|bar|my_id|
# +---+---+-----+
# | r1|ph1| 1|
# | r1|ph1| 1|
# | r1|ph2| 2|
# | s3|ph2| 3|
# | s3|ph2| 3|
# | s4|ph3| 4|
# +---+---+-----+
I have the following dataset
columns = ['id','trandatetime','code','zip']
data = [('1','2020-02-06T17:33:21.000+0000', '0','35763'),('1','2020-02-06T17:39:55.000+0000', '0','35763'), ('1','2020-02-07T06:06:42.000+0000', '0','35741'), ('1','2020-02-07T06:28:17.000+0000', '4','94043'),('1','2020-02-07T07:12:13.000+0000','0','35802'), ('1','2020-02-07T08:23:29.000+0000', '0','30738')]
df = spark.createDataFrame(data).toDF(*columns)
df= df.withColumn("trandatetime",to_timestamp("trandatetime"))
+---+--------------------+----+-----+
| id| trandatetime|code| zip|
+---+--------------------+----+-----+
| 1|2020-02-06T17:33:...| 0|35763|
| 1|2020-02-06T17:39:...| 0|35763|
| 1|2020-02-07T06:06:...| 0|35741|
| 1|2020-02-07T06:28:...| 4|94043|
| 1|2020-02-07T07:12:...| 0|35802|
| 1|2020-02-07T08:23:...| 0|30738|
+---+--------------------+----+-----+
I am trying to get the previous row zip when code = 0 within a time period.
This is my attempt, but you can see that the row where code is 4 is getting a value, that should be null. The row after the 4 is null, but that one should have a value in it.
from pyspark.sql.functions import *
from pyspark.sql import functions as F
from pyspark.sql import Window
w = Window.partitionBy('id').orderBy('timestamp').rangeBetween(-60*60*24,-1)
df = df.withColumn("Card_Present_Last_Zip",F.last(F.when(col("code") == '0', col("zip"))).over(w))
+---+--------------------+----+-----+----------+---------------------+
| id| trandatetime|code| zip| timestamp|Card_Present_Last_Zip|
+---+--------------------+----+-----+----------+---------------------+
| 1|2020-02-06T17:33:...| 0|35763|1581010401| null|
| 1|2020-02-06T17:39:...| 0|35763|1581010795| 35763|
| 1|2020-02-07T06:06:...| 0|35741|1581055602| 35763|
| 1|2020-02-07T06:28:...| 4|94043|1581056897| 35741|
| 1|2020-02-07T07:12:...| 0|35802|1581059533| null|
| 1|2020-02-07T08:23:...| 0|30738|1581063809| 35802|
+---+--------------------+----+-----+----------+---------------------+
Put the last function (with ignorenulls set to True) expression into another when clause to only apply window operation on rows with code = '0'
w = Window.partitionBy('id').orderBy('timestamp').rangeBetween(-60*60*24,-1)
df = (df
.withColumn("timestamp", F.unix_timestamp("trandatetime"))
.withColumn("Card_Present_Last_Zip", F.when(F.col("code") == '0', F.last(F.when(F.col("code") == '0', F.col("zip")), ignorenulls=True).over(w)))
)
df.show()
# +---+-------------------+----+-----+----------+---------------------+
# | id| trandatetime|code| zip| timestamp|Card_Present_Last_Zip|
# +---+-------------------+----+-----+----------+---------------------+
# | 1|2020-02-06 17:33:21| 0|35763|1581010401| null|
# | 1|2020-02-06 17:39:55| 0|35763|1581010795| 35763|
# | 1|2020-02-07 06:06:42| 0|35741|1581055602| 35763|
# | 1|2020-02-07 06:28:17| 4|94043|1581056897| null|
# | 1|2020-02-07 07:12:13| 0|35802|1581059533| 35741|
# | 1|2020-02-07 08:23:29| 0|30738|1581063809| 35802|
# +---+-------------------+----+-----+----------+---------------------+
You can use window function lag() .
window_spec = Window.partitionBy('id').orderBy('timestamp')
df.withColumn('prev_zip', lag('zip').over(window_spec)).\
withColumn('Card_Present_Last_Zip', when(col('code') == 0, col('prev_zip')).otherwise(None)).show()
I am trying to concatenate multiple dataframe columns I am not able to perform pyspark eval or expr on the below when statement inside concat_ws.
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
from pyspark.sql.functions import concat_ws,concat,when,col,expr
from pyspark.sql.functions import lit
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([("foo", "bar"), ("ba z", None)],
('a', 'b'))
keys = ['a','b']
key_val = ''
for key in keys:
key_val = key_val + 'when(df["{0}"].isNull(), lit("_")).otherwise(df["{0}"]),'.format(key)
key_val_exp = key_val.rsplit(',', 1)[0]
spaceDeleteUDF = udf(lambda s: str(s).replace(" ", "_").strip(), StringType())
df=df.withColumn("unique_id", spaceDeleteUDF(concat_ws("-",eval(key_val_exp))))
Error:
"TypeError: Invalid argument, not a string or column: (Column<b'CASE WHEN (a IS NULL) THEN _ ELSE a END'>, Column<b'CASE WHEN (b IS NULL) THEN _ ELSE b END'>) of type <class 'tuple'>. For column literals, use 'lit', 'array', 'struct' or 'create_map' function."
Expected output:
+----+----+---------+
| a| b|unique_id|
+----+----+---------+
| foo| bar| foo-bar|
|ba z|null| ba_z-_|
+----+----+---------+
check this out.
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([("foo", "bar"), ("ba z", None)],
('a', 'b'))
df.show()
# +----+----+
# | a| b|
# +----+----+
# | foo| bar|
# |ba z|null|
# +----+----+
df1 = df.select( *[F.col(column) for column in df.columns],*[ F.when(F.col(column).isNull(),F.lit('_')).otherwise(F.col(column)).alias(column+'_mod') for column in df.columns])
df2 = df1.select(*[F.col(column) for column in df1.columns if '_mod' not in column], *[ F.regexp_replace(column, r'\s', '_').alias(column) for column in df1.columns if '_mod' in column])
df3 = df2.select( *[F.col(column) for column in df1.columns if '_mod' not in column],F.concat_ws('-',*[F.col(column) for column in df2.columns if '_mod' in column]).alias('unique_id'))
df3.show()
# +----+----+---------+
# | a| b|unique_id|
# +----+----+---------+
# | foo| bar| foo-bar|
# |ba z|null| ba_z-_|
# +----+----+---------+
I don't understand the behaviour of this simple PySpark code snippet :
# Create simple test dataframe
l = [('Alice', 1),('Pierre', 3),('Jack', 5), ('Paul', 2)]
df_test = sqlcontext.createDataFrame(l, ['name', 'age'])
# Perform filter then Take 2 oldest
df_test = df_test.sort('age', ascending=False)\
.filter('age < 4') \
.limit(2)
df_test.show(2)
# This outputs as expected :
# +------+---+
# | name|age|
# +------+---+
# |Pierre| 3|
# | Paul| 2|
# +------+---+
df_test.collect()
# This outputs unexpectedly :
# [Row(name=u'Pierre', age=3), Row(name=u'Alice', age=1)]
Is this an expected behaviour of the collect() function ? How can I retrieve my column as a list that keeps the right order ?
Thanks
I had to use a sorter UDF to resolve this issue
def sorter(l):
import operator
res = sorted(l, key =operator.itemgetter(0))
L1=[item[1] for item in res]
#return " ".join(str(x) for x in L)
return "".join(L1)
sort_udf = udf(sorter)