How to use chaining in pyspark? - python-3.x

I have a dataframe called Incitoand in Supplier Inv Nocolumn of that data frame consists of comma separated values. I need to recreate the data frame by appropriately repeating those comma separated values using pyspark.I am using following python code for that.Can I convert this into pyspark?Is it possible via pyspark?
from itertools import chain
def chainer(s):
return list(chain.from_iterable(s.str.split(',')))
incito['Supplier Inv No'] = incito['Supplier Inv No'].astype(str)
# calculate lengths of splits
lens = incito['Supplier Inv No'].str.split(',').map(len)
# create new dataframe, repeating or chaining as appropriate
dfnew = pd.DataFrame({'Supplier Inv No': chainer(incito['Supplier Inv No']),
'Forwarder': np.repeat(incito['Forwarder'], lens),
'Mode': np.repeat(incito['Mode'], lens),
'File No': np.repeat(incito['File No'], lens),
'ETD': np.repeat(incito['ETD'], lens),
'Flight No': np.repeat(incito['Flight No'], lens),
'Shipped Country': np.repeat(incito['Shipped Country'], lens),
'Port': np.repeat(incito['Port'], lens),
'Delivered_Country': np.repeat(incito['Delivered_Country'], lens),
'AirWeight': np.repeat(incito['AirWeight'], lens),
'FREIGHT CHARGE': np.repeat(incito['FREIGHT CHARGE'], lens)})
This is what I tried in pyspark.But I am not getting the expected outcome.
from pyspark.context import SparkContext, SparkConf
from pyspark.sql.session import SparkSession
from pyspark.sql import functions as F
import pandas as pd
conf = SparkConf().setAppName("appName").setMaster("local")
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
ddf = spark.createDataFrame(dfnew)
exploded = ddf.withColumn('d', F.explode("Supplier Inv No"))
exploded.show()

Something like this, using repeat?
from pyspark.sql import functions as F
df = (spark
.sparkContext
.parallelize([
('ABCD',),
('EFGH',),
])
.toDF(['col_a'])
)
(df
.withColumn('col_b', F.repeat(F.col('col_a'), 2))
.withColumn('col_c', F.repeat(F.lit('X'), 10))
.show()
)
# +-----+--------+----------+
# |col_a| col_b| col_c|
# +-----+--------+----------+
# | ABCD|ABCDABCD|XXXXXXXXXX|
# | EFGH|EFGHEFGH|XXXXXXXXXX|
# +-----+--------+----------+

Related

Data type conversion in spark

I have an column id which had type int but later changed to bigint.
It has both types of values.
from pyspark.sql.functions import *
from pyspark.sql.types import *
df = spark.read.parquet('hdfs path')
df = df.select("id", "code")
df=df.withColumn("id1", df["id"].cast(LongType()))
res1=df.select("id1", "code")
res1.show(1, False)
It shows me the data frame but when i try to perform some operations on them
example:
res1.groupBy('code').agg(countDistinct("id1")).show(1, False)
I get Column: [id], Expected: int, Found: INT64
I tried mergeSchema did not work either.
from pyspark.sql.functions import *
from pyspark.sql.types import *
df1 = spark.read.parquet('hdfs path')
df2 = df1.select("id", "code")
df3 = df2.withColumn("id1", df2["id"].cast(LongType()))
res1=df3.select("id1", "code")
res1.show(1, False)
res1.groupBy("code").agg(countDistinct("id1")).show(1, False)
This should work. In spark Dataframes are immutable so you should not assign the value of transformation operation to a same df variable, you should use a different variable name. In scala it would give you compile time error but in python its allowed so you don't notice it.
if you want you could also chain all of your transformation and get a single df variable and perform groupby operation on it as below :
df = spark.read.parquet('hdfs path').select("id", "code").withColumn("id1", col("id").cast(LongType())).select("id1", "code")
df.groupBy("code").agg(countDistinct("id1")).show(1, False)

Spark - convert JSON array object to array of string

as part of my dataframe, one of the column has data in following manner
[{"text":"Tea"},{"text":"GoldenGlobes"}]
And I want to convert that as just array of strings.
["Tea", "GoldenGlobes"]
Would someone please let me know, how to do this?
See the example below without udf:
import pyspark.sql.functions as f
from pyspark import Row
from pyspark.shell import spark
from pyspark.sql.types import ArrayType, StructType, StructField, StringType
df = spark.createDataFrame([
Row(values='[{"text":"Tea"},{"text":"GoldenGlobes"}]'),
Row(values='[{"text":"GoldenGlobes"}]')
])
schema = ArrayType(StructType([
StructField('text', StringType())
]))
df \
.withColumn('array_of_str', f.from_json(f.col('values'), schema).text) \
.show()
Output:
+--------------------+-------------------+
| values| array_of_str|
+--------------------+-------------------+
|[{"text":"Tea"},{...|[Tea, GoldenGlobes]|
|[{"text":"GoldenG...| [GoldenGlobes]|
+--------------------+-------------------+
If the type of your column is array then something like this should work (not tested):
from pyspark.sql import functions as F
from pyspark.sql import types as T
c = F.array([F.get_json_object(F.col("colname")[0], '$.text')),
F.get_json_object(F.col("colname")[1], '$.text'))])
df = df.withColumn("new_col", c)
Or if the length is not fixed (I do not see a solution without an udf) :
F.udf(T.ArrayType())
def get_list(x):
o_list = []
for elt in x:
o_list.append(elt["text"])
return o_list
df = df.withColumn("new_col", get_list("colname"))
Sharing the Java syntax :
import static org.apache.spark.sql.functions.from_json;
import static org.apache.spark.sql.functions.get_json_object;
import static org.apache.spark.sql.functions.col;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import static org.apache.spark.sql.types.DataTypes.StringType;
Dataset<Row> df = getYourDf();
StructType structschema =
DataTypes.createStructType(
new StructField[] {
DataTypes.createStructField("text", StringType, true)
});
ArrayType schema = new ArrayType(structschema,true);
df = df.withColumn("array_of_str",from_json(col("colname"), schema).getField("text"));

PySpark: Search For substrings in text and subset dataframe

I am brand new to pyspark and want to translate my existing pandas / python code to PySpark.
I want to subset my dataframe so that only rows that contain specific key words I'm looking for in 'original_problem' field is returned.
Below is the Python code I tried in PySpark:
def pilot_discrep(input_file):
df = input_file
searchfor = ['cat', 'dog', 'frog', 'fleece']
df = df[df['original_problem'].str.contains('|'.join(searchfor))]
return df
When I try to run the above, I get the following error:
AnalysisException: u"Can't extract value from original_problem#207:
need struct type but got string;"
In pyspark, try this:
df = df[df['original_problem'].rlike('|'.join(searchfor))]
Or equivalently:
import pyspark.sql.functions as F
df.where(F.col('original_problem').rlike('|'.join(searchfor)))
Alternatively, you could go for udf:
import pyspark.sql.functions as F
searchfor = ['cat', 'dog', 'frog', 'fleece']
check_udf = F.udf(lambda x: x if x in searchfor else 'Not_present')
df = df.withColumn('check_presence', check_udf(F.col('original_problem')))
df = df.filter(df.check_presence != 'Not_present').drop('check_presence')
But the DataFrame methods are preferred because they will be faster.

Spark order by second field to perform timeseries function

I have a csv with a timeseries:
timestamp, measure-name, value, type, quality
1503377580,x.x-2.A,0.5281250,Float,GOOD
1503377340,x.x-1.B,0.0000000,Float,GOOD
1503377400,x.x-1.B,0.0000000,Float,GOOD
The measure-name should be my partition key and I would like to calculate a moving average with pyspark, here my code (for instance) to calculate the max
def mysplit(line):
ll = line.split(",")
return (ll[1],float(ll[2]))
text_file.map(lambda line: mysplit(line)).reduceByKey(lambda a, b: max(a , b)).foreach(print)
However, for the average I would like to respect the timestamp ordering.
How to order by a second column?
You need to use a window function on pyspark dataframes:
First you should transform your rdd to a dataframe:
from pyspark.sql import HiveContext
hc = HiveContext(sc)
df = hc.createDataFrame(text_file.map(lambda l: l.split(','), ['timestamp', 'measure-name', 'value', 'type', 'quality'])
Or load it directly as a dataframe:
local:
import pandas as pd
df = hc.createDataFrame(pd.read_csv(path_to_csv, sep=",", header=0))
from hdfs:
df = hc.read.format("com.databricks.spark.csv").option("delimiter", ",").load(path_to_csv)
Then use a window function:
from pyspark.sql import Window
import pyspark.sql.functions as psf
w = Window.orderBy('timestamp')
df.withColumn('value_rol_mean', psf.mean('value').over(w))
+----------+------------+--------+-----+-------+-------------------+
| timestamp|measure_name| value| type|quality| value_rol_mean|
+----------+------------+--------+-----+-------+-------------------+
|1503377340| x.x-1.B| 0.0|Float| GOOD| 0.0|
|1503377400| x.x-1.B| 0.0|Float| GOOD| 0.0|
|1503377580| x.x-2.A|0.528125|Float| GOOD|0.17604166666666665|
+----------+------------+--------+-----+-------+-------------------+
in .orderByyou can order by as many columns as you want

Error when converting from pyspark RDD to DataFrame: Cannot infer schema of type 'unicode' [duplicate]

Could someone help me solve this problem I have with Spark DataFrame?
When I do myFloatRDD.toDF() I get an error:
TypeError: Can not infer schema for type: type 'float'
I don't understand why...
Example:
myFloatRdd = sc.parallelize([1.0,2.0,3.0])
df = myFloatRdd.toDF()
Thanks
SparkSession.createDataFrame, which is used under the hood, requires an RDD / list of Row/tuple/list/dict* or pandas.DataFrame, unless schema with DataType is provided. Try to convert float to tuple like this:
myFloatRdd.map(lambda x: (x, )).toDF()
or even better:
from pyspark.sql import Row
row = Row("val") # Or some other column name
myFloatRdd.map(row).toDF()
To create a DataFrame from a list of scalars you'll have to use SparkSession.createDataFrame directly and provide a schema***:
from pyspark.sql.types import FloatType
df = spark.createDataFrame([1.0, 2.0, 3.0], FloatType())
df.show()
## +-----+
## |value|
## +-----+
## | 1.0|
## | 2.0|
## | 3.0|
## +-----+
but for a simple range it would be better to use SparkSession.range:
from pyspark.sql.functions import col
spark.range(1, 4).select(col("id").cast("double"))
* No longer supported.
** Spark SQL also provides a limited support for schema inference on Python objects exposing __dict__.
*** Supported only in Spark 2.0 or later.
from pyspark.sql.types import IntegerType, Row
mylist = [1, 2, 3, 4, None ]
l = map(lambda x : Row(x), mylist)
# notice the parens after the type name
df=spark.createDataFrame(l,["id"])
df.where(df.id.isNull() == False).show()
Basiclly, you need to init your int into Row(), then we can use the schema
Inferring the Schema Using Reflection
from pyspark.sql import Row
# spark - sparkSession
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
orders = sc.textFile("/practicedata/orders")
#Split on delimiters
parts = orders.map(lambda l: l.split(","))
#Convert to Row
orders_struct = parts.map(lambda p: Row(order_id=int(p[0]), order_date=p[1], customer_id=p[2], order_status=p[3]))
for i in orders_struct.take(5): print(i)
#convert the RDD to DataFrame
orders_df = spark.createDataFrame(orders_struct)
Programmatically Specifying the Schema
from pyspark.sql import Row
# spark - sparkSession
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
orders = sc.textFile("/practicedata/orders")
#Split on delimiters
parts = orders.map(lambda l: l.split(","))
#Convert to tuple
orders_struct = parts.map(lambda p: (p[0], p[1], p[2], p[3].strip()))
#convert the RDD to DataFrame
orders_df = spark.createDataFrame(orders_struct)
# The schema is encoded in a string.
schemaString = "order_id order_date customer_id status"
fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]
schema = Struct
ordersDf = spark.createDataFrame(orders_struct, schema)
Type(fields)
from pyspark.sql import Row
myFloatRdd.map(lambda x: Row(x)).toDF()

Resources