PySpark 1.6.2 | collect() after orderBy/sort - apache-spark

I don't understand the behaviour of this simple PySpark code snippet :
# Create simple test dataframe
l = [('Alice', 1),('Pierre', 3),('Jack', 5), ('Paul', 2)]
df_test = sqlcontext.createDataFrame(l, ['name', 'age'])
# Perform filter then Take 2 oldest
df_test = df_test.sort('age', ascending=False)\
.filter('age < 4') \
.limit(2)
df_test.show(2)
# This outputs as expected :
# +------+---+
# | name|age|
# +------+---+
# |Pierre| 3|
# | Paul| 2|
# +------+---+
df_test.collect()
# This outputs unexpectedly :
# [Row(name=u'Pierre', age=3), Row(name=u'Alice', age=1)]
Is this an expected behaviour of the collect() function ? How can I retrieve my column as a list that keeps the right order ?
Thanks

I had to use a sorter UDF to resolve this issue
def sorter(l):
import operator
res = sorted(l, key =operator.itemgetter(0))
L1=[item[1] for item in res]
#return " ".join(str(x) for x in L)
return "".join(L1)
sort_udf = udf(sorter)

Related

How to translate SQL UPDATE query which uses inner join into PySpark?

I have two MS Access SQL queries which I want to convert into PySpark. The queries look like this (we have two tables Employee and Department):
UPDATE EMPLOYEE INNER JOIN [DEPARTMENT] ON
EMPLOYEE.STATEPROVINCE = [DEPARTMENT].[STATE_LEVEL]
SET EMPLOYEE.STATEPROVINCE = [DEPARTMENT]![STATE_ABBREVIATION];
UPDATE EMPLOYEE INNER JOIN [DEPARTMENT] ON
EMPLOYEE.STATEPROVINCE = [DEPARTMENT].[STATE_LEVEL]
SET EMPLOYEE.MARKET = [DEPARTMENT]![MARKET];
Test dataframes:
from pyspark.sql import functions as F
df_emp = spark.createDataFrame([(1, 'a'), (2, 'bb')], ['EMPLOYEE', 'STATEPROVINCE'])
df_emp.show()
# +--------+-------------+
# |EMPLOYEE|STATEPROVINCE|
# +--------+-------------+
# | 1| a|
# | 2| bb|
# +--------+-------------+
df_dept = spark.createDataFrame([('bb', 'b')], ['STATE_LEVEL', 'STATE_ABBREVIATION'])
df_dept.show()
# +-----------+------------------+
# |STATE_LEVEL|STATE_ABBREVIATION|
# +-----------+------------------+
# | bb| b|
# +-----------+------------------+
Running your SQL query in Microsoft Access does the following:
In PySpark, you can get it like this:
df = (df_emp.alias('a')
.join(df_dept.alias('b'), df_emp.STATEPROVINCE == df_dept.STATE_LEVEL, 'left')
.select(
*[c for c in df_emp.columns if c != 'STATEPROVINCE'],
F.coalesce('b.STATE_ABBREVIATION', 'a.STATEPROVINCE').alias('STATEPROVINCE')
)
)
df.show()
# +--------+-------------+
# |EMPLOYEE|STATEPROVINCE|
# +--------+-------------+
# | 1| a|
# | 2| b|
# +--------+-------------+
First you do a left join. Then, select.
The select has 2 parts.
First, you select everything from df_emp except for "STATEPROVINCE".
Then, for the new "STATEPROVINCE", you select "STATE_ABBREVIATION" from df_dept, but in case it's null (i.e. not existent in df_dept), you take "STATEPROVINCE" from df_emp.
For your second query, you only need to change values in the select statement:
df = (df_emp.alias('a')
.join(df_dept.alias('b'), df_emp.STATEPROVINCE == df_dept.STATE_LEVEL, 'left')
.select(
*[c for c in df_emp.columns if c != 'MARKET'],
F.coalesce('b.MARKET', 'a.MARKET').alias('MARKET')
)
)

Find specific word in input file and read the data from next row in PySpark

Input File:
32535
1243
1q332|2
EOH
CUST_ID|CUST_NAME|ORDER_NO|ORDER_ITEM
1|TAM|222|ORANGE
2|AAM|322|APPLE
output
CUST_ID|CUST_NAME|ORDER_NO|ORDER_ITEM
1|TAM|222|ORANGE
2|AAM|322|APPLE
Mentioned above the input and output. I want to read input file, if found 'EOH' word in input file and convert to Dataframe from next line. Before 'EOH' rows should be ignored. Output format is given above.
sometime few rows may be added before 'EOH'.Need to pickup based on 'EOH' word.
Please share Pyspark code.
I don't know if this is the best approach, but here is:
from pyspark.sql.window import Window
import pyspark.sql.functions as f
df = (spark
.read
.format('csv')
.option('delimiter', '|')
.schema('CUST_ID string, CUST_NAME string, ORDER_NO integer, ORDER_ITEM STRING')
.load(YOUR_PATH))
# Identifying which line is the header
df = (df
.withColumn('id', f.monotonically_increasing_id())
.withColumn('header', f.lag('CUST_ID', default=False).over(Window.orderBy('id')) == f.lit('EOH')))
# Collecting only header row to python context
header = df.where(f.col('header')).head()
# Removing all rows before header
df = (df
.where(f.col('id') > f.lit(header.id))
.drop('id', 'header'))
df.show()
Output:
+-------+---------+--------+----------+
|CUST_ID|CUST_NAME|ORDER_NO|ORDER_ITEM|
+-------+---------+--------+----------+
| 1| TAM| 222| ORANGE|
| 2| AAM| 322| APPLE|
+-------+---------+--------+----------+
If the schema is fixed as in the comment, you can pass them into from_csv
schema = """
CUST_ID INT,
CUST_NAME STRING,
ORDER_NO INT,
ORDER_ITEM STRING
"""
# if you know for sure all fields are not null then
(df
.withColumn('value', F.from_csv('value', schema, {'sep': '|'}))
.select('value.*')
.where(
F.col('CUST_ID').isNotNull() &
F.col('CUST_NAME').isNotNull() &
F.col('ORDER_NO').isNotNull() &
F.col('ORDER_ITEM').isNotNull()
)
.show(10, False)
)
# if you unsure about the nulls, you can filter them before processing (or there are many other options)
(df
.withColumn('tmp', F.size(F.split('value', '\|')))
.where((F.col('tmp') == 4) & (~F.col('value').startswith('CUST_ID')))
.withColumn('value', F.from_csv('value', schema, {'sep': '|'}))
.select('value.*')
.show(10, False)
)
# +-------+---------+--------+----------+
# |CUST_ID|CUST_NAME|ORDER_NO|ORDER_ITEM|
# +-------+---------+--------+----------+
# |1 |TAM |222 |ORANGE |
# |2 |AAM |322 |APPLE |
# +-------+---------+--------+----------+

Getting the table name from a Spark Dataframe

If I have a dataframe created as follows:
df = spark.table("tblName")
Is there anyway that I can get back tblName from df?
You can extract it from the plan:
df.logicalPlan().argString().replace("`","")
We can extract tablename from a dataframe by parsing unresolved logical plan.
Please follow the method below:
def getTableName(df: DataFrame): String = {
Seq(df.queryExecution.logical, df.queryExecution.optimizedPlan).flatMap{_.collect{
case LogicalRelation(_, _, catalogTable: Option[CatalogTable], _) =>
if (catalogTable.isDefined) {
Some(catalogTable.get.identifier.toString())
} else None
case hive: HiveTableRelation => Some(hive.tableMeta.identifier.toString())
}
}.flatten.head
}
scala> val df = spark.table("db.table")
scala> getTableName(df)
res: String = `db`.`table`
Following utility function may be helpful to determine the table name from given DataFrame.
def get_dataframe_tablename(df: pyspark.sql.DataFrame) -> typing.Optional[str]:
"""
If the dataframe was created from an underlying table (e.g. spark.table('dual') or
spark.sql("select * from dual"), this function will return the
fully qualified table name (e.g. `default`.`dual`) as output otherwise it will return None.
Test on: python 3.7, spark 3.0.1, but it should work with Spark >=2.x and python >=3.4 too
Examples:
>>> get_dataframe_tablename(spark.table('dual'))
`default`.`dual`
>>> get_dataframe_tablename(spark.sql("select * from dual"))
`default`.`dual`
It inspects the output of `df.explain()` to determine that the df was created from a table or not
:param df: input dataframe whose underlying table name will be return
:return: table name or None
"""
def _explain(_df: pyspark.sql.DataFrame) -> str:
# df.explain() does not take parameter to accept the out and dump the output on stdout
# by default
import contextlib
import io
with contextlib.redirect_stdout(io.StringIO()) as f:
_df.explain()
f.seek(0) # Rewind stream position
explanation = f.readlines()[1] # Ignore first output line(#Physical Plan...)
return explanation
pattern = re.compile("Scan hive (.+), HiveTableRelation (.+?), (.+)")
output = _explain(df)
match = pattern.search(output)
return match.group(2) if match else None
Below three line of code will give table and database name
import org.apache.spark.sql.execution.FileSourceScanExec
df=session.table("dealer")
df.queryExecution.sparkPlan.asInstanceOf[FileSourceScanExec].tableIdentifier
Any answer on this one yet? I found a way but it's probably not the prettiest. You can access the tablename by retrieving the physical execution plan and then doing some string splitting magic on it.
Let's say you have a table from database_name.tblName. The following should work:
execution_plan = df.__jdf.queryExecution().simpleString()
table_name = string.split('FileScan')[1].split('[')[0].split('.')[1]
The first line will return your execution plan in a string format. That will look similar to this:
== Physical Plan ==\n*(1) ColumnarToRow\n+- FileScan parquet database_name.tblName[column1#2880,column2ban#2881] Batched: true, DataFilters: [], Format: Parquet, Location: PreparedDeltaFileIndex[dbfs:/mnt/lake/database_name/table_name], PartitionFilters: [], PushedFilters: [], ReadSchema: struct<column1:string,column2:string...\n\n'
After that you can run some string splitting to access the relevant information. The first string split gets you all the elements of FileScan- you are interested in the second element, then before and after the [- here the first element is of interest. The second string split after . will return tblName
You can create table from df. But if table is a local temporary view or a global temporary view you should drop it (sqlContext.dropTempTable) before create a table with same name or use create or replace function (spark.createOrReplaceGlobalTempView or spark.createOrReplaceTempView). If table is temp table you can create table with same name without error
#Create data frame
>>> d = [('Alice', 1)]
>>> test_df = spark.createDataFrame(sc.parallelize(d), ['name','age'])
>>> test_df.show()
+-----+---+
| name|age|
+-----+---+
|Alice| 1|
+-----+---+
#create tables
>>> test_df.createTempView("tbl1")
>>> test_df.registerTempTable("tbl2")
>>> sqlContext.tables().show()
+--------+---------+-----------+
|database|tableName|isTemporary|
+--------+---------+-----------+
| | tbl1| true|
| | tbl2| true|
+--------+---------+-----------+
#create data frame from tbl1
>>> df = spark.table("tbl1")
>>> df.show()
+-----+---+
| name|age|
+-----+---+
|Alice| 1|
+-----+---+
#create tbl1 again with using df data frame. It will get error
>>> df.createTempView("tbl1")
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
pyspark.sql.utils.AnalysisException: "Temporary view 'tbl1' already exists;"
#drop and create again
>>> sqlContext.dropTempTable('tbl1')
>>> df.createTempView("tbl1")
>>> spark.sql('select * from tbl1').show()
+-----+---+
| name|age|
+-----+---+
|Alice| 1|
+-----+---+
#create data frame from tbl2 and replace name value
>>> df = spark.table("tbl2")
>>> df = df.replace('Alice', 'Bob')
>>> df.show()
+----+---+
|name|age|
+----+---+
| Bob| 1|
+----+---+
#create tbl2 again with using df data frame
>>> df.registerTempTable("tbl2")
>>> spark.sql('select * from tbl2').show()
+----+---+
|name|age|
+----+---+
| Bob| 1|
+----+---+

Removing NULL , NAN, empty space from PySpark DataFrame

I have a dataframe in PySpark which contains empty space, Null, and Nan.
I want to remove rows which have any of those. I tried below commands, but, nothing seems to work.
myDF.na.drop().show()
myDF.na.drop(how='any').show()
Below is the dataframe:
+---+----------+----------+-----+-----+
|age| category| date|empId| name|
+---+----------+----------+-----+-----+
| 25|electronic|17-01-2018| 101| abc|
| 24| sports|16-01-2018| 102| def|
| 23|electronic|17-01-2018| 103| hhh|
| 23|electronic|16-01-2018| 104| yyy|
| 29| men|12-01-2018| 105| ajay|
| 31| kids|17-01-2018| 106|vijay|
| | Men| nan| 107|Sumit|
+---+----------+----------+-----+-----+
What am I missing? What is the best way to tackle NULL, Nan or empty spaces so that there is no problem in the actual calculation?
NaN (not a number) has different meaning that NULL and empty string is just a normal value (can be converted to NULL automatically with csv reader) so na.drop won't match these.
You can convert all to null and drop
from pyspark.sql.functions import col, isnan, when, trim
df = spark.createDataFrame([
("", 1, 2.0), ("foo", None, 3.0), ("bar", 1, float("NaN")),
("good", 42, 42.0)])
def to_null(c):
return when(~(col(c).isNull() | isnan(col(c)) | (trim(col(c)) == "")), col(c))
df.select([to_null(c).alias(c) for c in df.columns]).na.drop().show()
# +----+---+----+
# | _1| _2| _3|
# +----+---+----+
# |good| 42|42.0|
# +----+---+----+
Maybe in your case it is not important but this code (modifed answer of Alper t. Turker) can handle different datatypes accordingly. The dataTypes can vary according your DataFrame of course. (tested on Spark version: 2.4)
from pyspark.sql.functions import col, isnan, when, trim
# Find out dataType and act accordingly
def to_null_bool(c, dt):
if df == "double":
return c.isNull() | isnan(c)
elif df == "string":
return ~c.isNull() & (trim(c) != "")
else:
return ~c.isNull()
# Only keep columns with not empty strings
def to_null(c, dt):
c = col(c)
return when(to_null_bool(c, dt), c)
df.select([to_null(c, dt[1]).alias(c) for c, dt in zip(df.columns, df.dtypes)]).na.drop(how="any").show()

How to detect null column in pyspark

I have a dataframe defined with some null values. Some Columns are fully null values.
>> df.show()
+---+---+---+----+
| A| B| C| D|
+---+---+---+----+
|1.0|4.0|7.0|null|
|2.0|5.0|7.0|null|
|3.0|6.0|5.0|null|
+---+---+---+----+
In my case, I want to return a list of columns name that are filled with null values. My idea was to detect the constant columns (as the whole column contains the same null value).
this is how I did it:
nullCoulumns = [c for c, const in df.select([(min(c) == max(c)).alias(c) for c in df.columns]).first().asDict().items() if const]
but this does no consider null columns as constant, it works only with values.
How should I then do it ?
Extend the condition to
from pyspark.sql.functions import min, max
((min(c).isNull() & max(c).isNull()) | (min(c) == max(c))).alias(c)
or use eqNullSafe (PySpark 2.3):
(min(c).eqNullSafe(max(c))).alias(c)
One way would be to do it implicitly: select each column, count its NULL values, and then compare this with the total number or rows. With your data, this would be:
spark.version
# u'2.2.0'
from pyspark.sql.functions import col
nullColumns = []
numRows = df.count()
for k in df.columns:
nullRows = df.where(col(k).isNull()).count()
if nullRows == numRows: # i.e. if ALL values are NULL
nullColumns.append(k)
nullColumns
# ['D']
But there is a simpler way: it turns out that the function countDistinct, when applied to a column with all NULL values, returns zero (0):
from pyspark.sql.functions import countDistinct
df.agg(countDistinct(df.D).alias('distinct')).collect()
# [Row(distinct=0)]
So the for loop now can be:
nullColumns = []
for k in df.columns:
if df.agg(countDistinct(df[k])).collect()[0][0] == 0:
nullColumns.append(k)
nullColumns
# ['D']
UPDATE (after comments): It seems possible to avoid collect in the second solution; since df.agg returns a dataframe with only one row, replacing collect with take(1) will safely do the job:
nullColumns = []
for k in df.columns:
if df.agg(countDistinct(df[k])).take(1)[0][0] == 0:
nullColumns.append(k)
nullColumns
# ['D']
How about this? In order to guarantee the column are all nulls, two properties must be satisfied:
(1) The min value is equal to the max value
(2) The min or max is null
Or, equivalently
(1) The min AND max are both equal to None
Note that if property (2) is not satisfied, the case where column values are [null, 1, null, 1] would be incorrectly reported since the min and max will be 1.
import pyspark.sql.functions as F
def get_null_column_names(df):
column_names = []
for col_name in df.columns:
min_ = df.select(F.min(col_name)).first()[0]
max_ = df.select(F.max(col_name)).first()[0]
if min_ is None and max_ is None:
column_names.append(col_name)
return column_names
Here's an example in practice:
>>> rows = [(None, 18, None, None),
(1, None, None, None),
(1, 9, 4.0, None),
(None, 0, 0., None)]
>>> schema = "a: int, b: int, c: float, d:int"
>>> df = spark.createDataFrame(data=rows, schema=schema)
>>> df.show()
+----+----+----+----+
| a| b| c| d|
+----+----+----+----+
|null| 18|null|null|
| 1|null|null|null|
| 1| 9| 4.0|null|
|null| 0| 0.0|null|
+----+----+----+----+
>>> get_null_column_names(df)
['d']

Resources