Dataframe TypeError cannot accept object - apache-spark

I have list of string in python as follows :
['start_column=column123;to_3=2020-09-07 10:29:24;to_1=2020-09-07 10:31:08;to_0=2020-09-07 10:31:13;',
'start_column=column475;to_3=2020-09-07 10:29:34;']
I am trying to convert it into dataframe in following way :
schema = StructType([
StructField('Rows', ArrayType(StringType()), True)
])
rdd = sc.parallelize(test_list)
query_data = spark.createDataFrame(rdd,schema)
print(query_data.schema)
query_data.show()
I am getting following error:
TypeError: StructType can not accept object

You just need to pass that as a list while creating the dataframe as below ...
a_list = ['start_column=column123;to_3=2020-09-07 10:29:24;to_1=2020-09-07 10:31:08;to_0=2020-09-07 10:31:13;',
'start_column=column475;to_3=2020-09-07 10:29:34;']
sparkdf = spark.createDataFrame([a_list],["col1", "col2"])
sparkdf.show(truncate=False)
+--------------------------------------------------------------------------------------------------+------------------------------------------------+
|col1 |col2 |
+--------------------------------------------------------------------------------------------------+------------------------------------------------+
|start_column=column123;to_3=2020-09-07 10:29:24;to_1=2020-09-07 10:31:08;to_0=2020-09-07 10:31:13;|start_column=column475;to_3=2020-09-07 10:29:34;|
+--------------------------------------------------------------------------------------------------+------------------------------------------------+

You should use schema = StringType() because your rows contains strings rather than structs of strings.

I have two possible solutions for you.
SOLUTION 1: Assuming you wanted a dataframe with just one row
I was able to make it work by wrapping the values in test_list in Parentheses and using StringType.
v = [('start_column=column123;to_3=2020-09-07 10:29:24;to_1=2020-09-07 10:31:08;to_0=2020-09-07 10:31:13;',
'start_column=column475;to_3=2020-09-07 10:29:34;')]
schema = StructType([
StructField('col_1', StringType(), True),
StructField('col_2', StringType(), True),
])
rdd = sc.parallelize(v)
query_data = spark.createDataFrame(rdd,schema)
print(query_data.schema)
query_data.show(truncate = False)
SOLUTION 2: Assuming you wanted a dataframe with just one column
v = ['start_column=column123;to_3=2020-09-07 10:29:24;to_1=2020-09-07 10:31:08;to_0=2020-09-07 10:31:13;',
'start_column=column475;to_3=2020-09-07 10:29:34;']
from pyspark.sql.types import StringType
df = spark.createDataFrame(v, StringType())
df.show(truncate = False)

Related

Access accumulator value after using it in user defined function within df.widthColumn in Palantir Foundry

I am trying to use a customized accumulator within Palantir Foundry to aggregate Data within
a user defined function which is applied to each row of a dataframe within a statement df.withColumn(...).
From the resulting dataframe, I see, that the incrementation of the accumulator-value happens as expected. However, the value of the accumulator variable itself in the script does not change during the execution.
I see, that the Python-ID of the accumulator variable in the script differs from the Python-ID of the accumulator within the user defined function. But that might be expected...
How do I access the accumulator value which incrementation can be watched in the resulting dataframe-colun from within the calling script after the execution, as this is the information I am looking for?
from transforms.api import transform_df, Input, Output
import numpy as np
from pyspark.accumulators import AccumulatorParam
from pyspark.sql.functions import udf, struct
global accum
#transform_df(
Output("ri.foundry.main.dataset.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"),
)
def compute(ctx):
from pyspark.sql.types import StructType, StringType, IntegerType, StructField
data2 = [("James","","Smith","36636","M",3000),
("Michael","Rose","","40288","M",4000),
("Robert","","Williams","42114","M",4000),
("Maria","Anne","Jones","39192","F",4000),
("Jen","Mary","Brown","","F",-1)
]
schema = StructType([ \
StructField("firstname",StringType(),True), \
StructField("middlename",StringType(),True), \
StructField("lastname",StringType(),True), \
StructField("id", StringType(), True), \
StructField("gender", StringType(), True), \
StructField("salary", IntegerType(), True) \
])
df = ctx.spark_session.createDataFrame(data=data2, schema=schema)
####################################
class AccumulatorNumpyArray(AccumulatorParam):
def zero(self, zero: np.ndarray):
return zero
def addInPlace(self, v1, v2):
return v1 + v2
# from pyspark.context import SparkContext
# sc = SparkContext.getOrCreate()
sc = ctx.spark_session.sparkContext
shape = 3
global accum
accum = sc.accumulator(
np.zeros(shape, dtype=np.int64),
AccumulatorNumpyArray(),
)
def func(row):
global accum
accum += np.ones(shape)
return str(accum) + '_' + str(id(accum))
user_defined_function = udf(func, StringType())
new = df.withColumn("processed", user_defined_function(struct([df[col] for col in df.columns])))
new.show(2)
print(accum)
return df
results in
+---------+----------+--------+-----+------+------+--------------------+
|firstname|middlename|lastname| id|gender|salary| processed|
+---------+----------+--------+-----+------+------+--------------------+
| James| | Smith|36636| M| 3000|[1. 1. 1.]_140388...|
| Michael| Rose| |40288| M| 4000|[2. 2. 2.]_140388...|
+---------+----------+--------+-----+------+------+--------------------+
only showing top 2 rows
and
> accum
Accumulator<id=0, value=[0 0 0]>
> id(accum)
140574405092256
If the Foundry-Boiler-Plate is removed, resulting in
import numpy as np
from pyspark.accumulators import AccumulatorParam
from pyspark.sql.functions import udf, struct
from pyspark.sql.types import StructType, StringType, IntegerType, StructField
from pyspark.sql import SparkSession
from pyspark.context import SparkContext
spark = (
SparkSession.builder.appName("Python Spark SQL basic example")
.config("spark.some.config.option", "some-value")
.getOrCreate()
)
# ctx = spark.sparkContext.getOrCreate()
data2 = [
("James", "", "Smith", "36636", "M", 3000),
("Michael", "Rose", "", "40288", "M", 4000),
("Robert", "", "Williams", "42114", "M", 4000),
("Maria", "Anne", "Jones", "39192", "F", 4000),
("Jen", "Mary", "Brown", "", "F", -1),
]
schema = StructType(
[
StructField("firstname", StringType(), True),
StructField("middlename", StringType(), True),
StructField("lastname", StringType(), True),
StructField("id", StringType(), True),
StructField("gender", StringType(), True),
StructField("salary", IntegerType(), True),
]
)
# df = ctx.spark_session.createDataFrame(data=data2, schema=schema)
df = spark.createDataFrame(data=data2, schema=schema)
####################################
class AccumulatorNumpyArray(AccumulatorParam):
def zero(self, zero: np.ndarray):
return zero
def addInPlace(self, v1, v2):
return v1 + v2
sc = SparkContext.getOrCreate()
shape = 3
global accum
accum = sc.accumulator(
np.zeros(shape, dtype=np.int64),
AccumulatorNumpyArray(),
)
def func(row):
global accum
accum += np.ones(shape)
return str(accum) + "_" + str(id(accum))
user_defined_function = udf(func, StringType())
new = df.withColumn(
"processed", user_defined_function(struct([df[col] for col in df.columns]))
)
new.show(2, False)
print(id(accum))
print(accum)
the output obtained within a regular Python environment with pyspark version 3.3.1 on Ubuntu meets the expectations and is
+---------+----------+--------+-----+------+------+--------------------------+
|firstname|middlename|lastname|id |gender|salary|processed |
+---------+----------+--------+-----+------+------+--------------------------+
|James | |Smith |36636|M |3000 |[1. 1. 1.]_139642682452576|
|Michael |Rose | |40288|M |4000 |[1. 1. 1.]_139642682450224|
+---------+----------+--------+-----+------+------+--------------------------+
only showing top 2 rows
140166944013424
[3. 3. 3.]
The code that runs outside of the transform is ran in a different environment than the code within your transform. When you commit, you'll be running your checks which runs the code outside the transform to generate the jobspec which is technically your executable transform. You can find these within the "details" of your dataset after the checks pass.
The logic within your transform is then detached and runs in isolation each time you hit build. The global accum you define outside the transform is never ran and doesn't exist when the code inside the compute is running.
global accum <-- runs in checks
#transform_df(
Output("ri.foundry.main.dataset.c0d4fc0c-bb1d-4c7b-86ce-a13ec6666490"),
)
def compute(ctx):
bla bla some logic <-- runs during build
The prints you are doing during your second code example, happen after the df is processed, because you are asking spark to compute with the new.show(2, false). While the print you are doing in the first example happen before the df is processed, since the compute will only happen after your return df.
If you want to try to print after your df is computed, you can use #transform(... instead of #transform_df(... and do a print after writing the dataframe contents. Should be something like this:
#transform(
output=Output("ri.foundry.main.dataset.c0d4fc0c-bb1d-4c7b-86ce-a13ec6666490"),
)
def compute(ctx, output):
df = ... some logic ...
output.write_dataframe(df) # please check the function name I think it was write_dataframe, but may be wrong
print accum

how to print a "dictionary" of StringType() in the form of a table with pyspark

Hi,
The above column is part of a table that I am working with in Databricks. What I wish to do is to turn the "ecommerce" col into a table of its own. In this case, it means that I would have a new table with "detail", "products"....etc as columns. Currently "ecommerce" is a StringType.
I have tried using spark dictionary creation, tabulate and other methods but to no success.
The code that I have currently is
def ecommerce_wtchk_dlt():
df = dlt.read_stream("wtchk_dlt")
ddf = df.select(col("ecommerce"))
header = ddf[0].keys()
rows = [x.values() for x in ddf]
dddf = tabulate.tabulate(rows, header)
return dddf
Whenever I try to forcefully set the type of the ecommerce as MapType I have the error that says that since the original datasource is StringType I can only use the same one as well
I have reproduced the above and able to achieve your requirement in this case by using from_json, json_tuple and explode.
This is my sample data with the same format as yours.
Code:
from pyspark.sql import functions as F
from pyspark.sql.types import *
df2 = df.select(F.json_tuple(df["ecommerce"],"detail")).toDF("detail") \
.select(F.json_tuple(F.col("detail"),"products")).toDF("products")
print("products : ")
df2.show()
schema = ArrayType(StructType([
StructField("name", StringType()),
StructField("id", StringType()),
StructField("price", StringType()),
StructField("brand", StringType()),
StructField("category", StringType()),
StructField("variant", StringType())
]))
final_df=df2.withColumn("products", F.from_json("products", schema)).select(F.explode("products").alias("products")).select("products.*")
print("Final dataframe : ")
final_df.show()
My Result:

Error when returning an ArrayType of StructType from UDF (and using a single function in multiple UDFs)

(EDIT) changed field names (from foo, bar,... to name and city) because old naming was confusing
I need to use a single function in multiple UDFs and return different Structs depending on the input.
This simplified version of my implementation basically does what I am looking for:
from pyspark.sql.types import IntegerType, StructType, StringType
from pyspark.sql.functions import when, col
df = spark.createDataFrame([1, 2, 3], IntegerType()).toDF('id')
struct_one = StructType().add('name', StringType(), True)
struct_not_one = StructType().add('city', StringType(), True)
def select(id):
if id == 1:
return {'name': 'Alice'}
else:
return {'city': 'Seattle'}
one_udf = udf(select, struct_one)
not_one_udf = udf(select, struct_not_one)
df = df.withColumn('one', when((col('id') == 1), one_udf(col('id'))))\
.withColumn('not_one', when((col('id') != 1), not_one_udf(col('id'))))
display(df)
(EDIT) Output:
id one not_one
1 {"name":"Alice"} null
2 null {"city":"Seattle"}
3 null {"city":"Seattle"}
But, the same code returning an ArrayType of StructType unfortunatly fails:
from pyspark.sql.types import IntegerType, StructType, StringType, ArrayType
from pyspark.sql.functions import when, col
df = spark.createDataFrame([1, 2, 3], IntegerType()).toDF('id')
struct_one = StructType().add('name', StringType(), True)
struct_not_one = ArrayType(StructType().add('city', StringType(), True))
def select(id):
if id == 1:
return {'name': 'Alice'}
else:
return [{'city': 'Seattle'}, {'city': 'Milan'}]
one_udf = udf(select, struct_one)
not_one_udf = udf(select, struct_not_one)
df = df.withColumn('one', when((col('id') == 1), one_udf(col('id'))))\
.withColumn('not_one', when((col('id') != 1), not_one_udf(col('id'))))
display(df)
The error message is:
ValueError: Unexpected tuple 'name' with StructType
(EDIT) Desired Output would be:
id one not_one
1 {"name":"Alice"} null
2 null [{"city":"Seattle"},{"city":"Milan"}]
3 null [{"city":"Seattle"},{"city":"Milan"}]
Returning and ArrayType of other types (StringType, IntegerType,...) for example works, though.
Also returning an Array of StructType when not using a single function in multiple UDFs works:
from pyspark.sql.types import IntegerType, StructType, StringType, ArrayType
from pyspark.sql.functions import when, col
df = spark.createDataFrame([1, 2, 3], IntegerType()).toDF('id')
struct_not_one = ArrayType(StructType().add('city', StringType(), True))
def select(id):
return [{'city': 'Seattle'}, {'city': 'Milan'}]
not_one_udf = udf(select, struct_not_one)
df = df.withColumn('not_one', when((col('id') != 1), not_one_udf(col('id'))))
display(df)
(EDIT) Output:
id not_one
1 null
2 [{"city":"Seattle"},{"city":"Milan"}]
3 [{"city":"Seattle"},{"city":"Milan"}]
Why is returning an ArrayType of StructType and using multiple UDFs with one single function not working?
Thanks!
"Spark SQL (including SQL and the DataFrame and Dataset API) does not guarantee the order of evaluation of subexpressions...
Therefore, it is dangerous to rely on the side effects or order of evaluation of Boolean expressions, and the order of WHERE and HAVING clauses, since such expressions and clauses can be reordered during query optimization and planning. Specifically, if a UDF relies on short-circuiting semantics in SQL for null checking, there’s no guarantee that the null check will happen before invoking the UDF."
See Evaluation order and null checking
To keep your udf generic you could push the 'when filter' into your udf:
from pyspark.sql.types import IntegerType, StructType, StringType, ArrayType
from pyspark.sql.functions import when, col, lit
df = spark.createDataFrame([1, 2, 3], IntegerType()).toDF('id')
struct_one = StructType().add('name', StringType(), True)
struct_not_one = ArrayType(StructType().add('city', StringType(), True))
def select(id, test):
if eval(test.format(id)) is False:
return None
if id == 1:
return {'name': 'Alice'}
else:
return [{'city': 'Seattle'}, {'city': 'Milan'}]
one_udf = udf(select, struct_one)
not_one_udf = udf(select, struct_not_one)
df = df.withColumn('one', one_udf(col('id'), lit('{} == 1')))\
.withColumn('not_one', not_one_udf(col('id'), lit('{} != 1')))
display(df)

Pyspark select from empty dataframe throws exception

the question is similar to this question but it had no answer, I have a dataframe from which am selecting data if exists
schema = StructType([
StructField("file_name", StringType(), True),
StructField("result", ArrayType(StructType()), True),
])
df = rdd.toDF(schema=schema)
print((df.count(), len(df.columns))) # 0,2
df.cache()
df = df.withColumn('result', F.explode(df['result']))
get_doc_id = F.udf(lambda line: ntpath.basename(line).replace('_all.txt', ''), StringType())
df = df.filter(df.result.isNotNull()).select(F.lit(job_id).alias('job_id'),
get_doc_id(df['file_name']).alias('doc_id'),
df['result._2'].alias('line_content'),
df['result._4'].alias('line1'),
df['result._3'].alias('line2'))
the above throws error when the dataframe is empty
pyspark.sql.utils.AnalysisException: 'No such struct field _2 in ;
shouldn't it only executes if result column had data ? and how to overcome this ?
Spark executes code lazily. So it won't check whether you have data in your filter condition. Your code fails in Analysis stage because you don't have a column named result._2 in your data. You are passing empty StructType in your schema for result column. You should update it to something like this:
schema = StructType([
StructField("file_name", StringType(), True),
StructField("result", ArrayType(StructType([StructField("line_content",StringType(),True), StructField("line1",StringType(),True), StructField("line2",StringType(),True)])), True)
])
df = spark.createDataFrame(sc.emptyRDD(),schema=schema)
df = df.withColumn('result', F.explode(df['result']))
get_doc_id = F.udf(lambda line: ntpath.basename(line).replace('_all.txt', ''), StringType())
df = df.filter(df.result.isNotNull()).select(F.lit('job_id').alias('job_id'),
get_doc_id(df['file_name']).alias('doc_id'),
df['result.line_content'].alias('line_content'),
df['result.line1'].alias('line1'),
df['result.line2'].alias('line2'))
Issue is that 'df' does not have '_2'. So it ends up throwing errors like:
pyspark.sql.utils.AnalysisException: 'No such struct field _2 in ;
You can try checking if the column exists by
if not '_2' in result.columns:
#Your code goes here
I would generally initialise the column with 0 or None if it does not exists like
from pyspark.sql.functions import lit
if not '_2' in result.columns:
result = result.withColumn('_2', lit(0))

Error when converting from pyspark RDD to DataFrame: Cannot infer schema of type 'unicode' [duplicate]

Could someone help me solve this problem I have with Spark DataFrame?
When I do myFloatRDD.toDF() I get an error:
TypeError: Can not infer schema for type: type 'float'
I don't understand why...
Example:
myFloatRdd = sc.parallelize([1.0,2.0,3.0])
df = myFloatRdd.toDF()
Thanks
SparkSession.createDataFrame, which is used under the hood, requires an RDD / list of Row/tuple/list/dict* or pandas.DataFrame, unless schema with DataType is provided. Try to convert float to tuple like this:
myFloatRdd.map(lambda x: (x, )).toDF()
or even better:
from pyspark.sql import Row
row = Row("val") # Or some other column name
myFloatRdd.map(row).toDF()
To create a DataFrame from a list of scalars you'll have to use SparkSession.createDataFrame directly and provide a schema***:
from pyspark.sql.types import FloatType
df = spark.createDataFrame([1.0, 2.0, 3.0], FloatType())
df.show()
## +-----+
## |value|
## +-----+
## | 1.0|
## | 2.0|
## | 3.0|
## +-----+
but for a simple range it would be better to use SparkSession.range:
from pyspark.sql.functions import col
spark.range(1, 4).select(col("id").cast("double"))
* No longer supported.
** Spark SQL also provides a limited support for schema inference on Python objects exposing __dict__.
*** Supported only in Spark 2.0 or later.
from pyspark.sql.types import IntegerType, Row
mylist = [1, 2, 3, 4, None ]
l = map(lambda x : Row(x), mylist)
# notice the parens after the type name
df=spark.createDataFrame(l,["id"])
df.where(df.id.isNull() == False).show()
Basiclly, you need to init your int into Row(), then we can use the schema
Inferring the Schema Using Reflection
from pyspark.sql import Row
# spark - sparkSession
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
orders = sc.textFile("/practicedata/orders")
#Split on delimiters
parts = orders.map(lambda l: l.split(","))
#Convert to Row
orders_struct = parts.map(lambda p: Row(order_id=int(p[0]), order_date=p[1], customer_id=p[2], order_status=p[3]))
for i in orders_struct.take(5): print(i)
#convert the RDD to DataFrame
orders_df = spark.createDataFrame(orders_struct)
Programmatically Specifying the Schema
from pyspark.sql import Row
# spark - sparkSession
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
orders = sc.textFile("/practicedata/orders")
#Split on delimiters
parts = orders.map(lambda l: l.split(","))
#Convert to tuple
orders_struct = parts.map(lambda p: (p[0], p[1], p[2], p[3].strip()))
#convert the RDD to DataFrame
orders_df = spark.createDataFrame(orders_struct)
# The schema is encoded in a string.
schemaString = "order_id order_date customer_id status"
fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]
schema = Struct
ordersDf = spark.createDataFrame(orders_struct, schema)
Type(fields)
from pyspark.sql import Row
myFloatRdd.map(lambda x: Row(x)).toDF()

Resources