Spark DataFrame Filter function throwing Task not Serializable exception - apache-spark

I am trying to filter data frame/dataset records using filter function with scala anonymous function. but it throws Task not serializable exception can someone please look into code and explain to me what mistake with code.
val spark = SparkSession.builder()
.appName("test data frame")
.master("local[*]")
.getOrCreate()
val user_seq = Seq(
Row(1,"John","London"),
Row(1,"Martin","New York"),
Row(1,"Abhishek","New York")
)
val user_schema = StructType(
Array(
StructField("user_id",IntegerType,true),
StructField("user_name",StringType,true),
StructField("user_city",StringType,true)
))
var user_df = spark.createDataFrame(spark.sparkContext.parallelize(user_seq),user_schema)
var user_rdd = user_df.filter((item)=>{
return item.getString(2) == "New York"
})
user_rdd.count();
I can see below exception on console. when I am trying to filter data with ColumnName its working fine.
objc[48765]: Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_144.jdk/Contents/Home/bin/java (0x1059db4c0) and /Library/Java/JavaVirtualMachines/jdk1.8.0_144.jdk/Contents/Home/jre/lib/libinstrument.dylib (0x105a5f4e0). One of the two will be used. Which one is undefined.
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
20/07/18 20:10:09 INFO SparkContext: Running Spark version 2.4.6
20/07/18 20:10:09 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
20/07/18 20:10:09 INFO SparkContext: Submitted application: test data frame
20/07/18 20:10:09 INFO SecurityManager: Changing view acls groups to:
20/07/18 20:10:09 INFO SecurityManager: Changing modify acls groups to:
20/07/18 20:10:12 INFO StateStoreCoordinatorRef: Registered StateStoreCoordinator endpoint
20/07/18 20:10:12 INFO ContextCleaner: Cleaned accumulator 0
20/07/18 20:10:13 INFO CodeGenerator: Code generated in 170.789451 ms
20/07/18 20:10:13 INFO CodeGenerator: Code generated in 17.729004 ms
Exception in thread "main" org.apache.spark.SparkException: Task not serializable
at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:416)
at org.apache.spark.util.ClosureCleaner$.org$apache$spark$util$ClosureCleaner$$clean(ClosureCleaner.scala:406)
at org.apache.spark.util.ClosureCleaner$.clean(ClosureCleaner.scala:163)
at org.apache.spark.SparkContext.clean(SparkContext.scala:2326)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1.apply(RDD.scala:872)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1.apply(RDD.scala:871)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:385)
at org.apache.spark.rdd.RDD.mapPartitionsWithIndex(RDD.scala:871)
at org.apache.spark.sql.execution.WholeStageCodegenExec.doExecute(WholeStageCodegenExec.scala:630)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.prepareShuffleDependency(ShuffleExchangeExec.scala:92)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec$$anonfun$doExecute$1.apply(ShuffleExchangeExec.scala:128)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec$$anonfun$doExecute$1.apply(ShuffleExchangeExec.scala:119)
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.doExecute(ShuffleExchangeExec.scala:119)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127)
at org.apache.spark.sql.execution.InputAdapter.inputRDDs(WholeStageCodegenExec.scala:391)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.inputRDDs(HashAggregateExec.scala:151)
at org.apache.spark.sql.execution.WholeStageCodegenExec.doExecute(WholeStageCodegenExec.scala:627)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127)
at org.apache.spark.sql.execution.SparkPlan.getByteArrayRdd(SparkPlan.scala:247)
at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:296)
at org.apache.spark.sql.Dataset$$anonfun$count$1.apply(Dataset.scala:2836)
at org.apache.spark.sql.Dataset$$anonfun$count$1.apply(Dataset.scala:2835)
at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3370)
at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:80)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:127)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:75)
at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3369)
at org.apache.spark.sql.Dataset.count(Dataset.scala:2835)
at DataFrameTest$.main(DataFrameTest.scala:65)
at DataFrameTest.main(DataFrameTest.scala)
Caused by: java.io.NotSerializableException: java.lang.Object
Serialization stack:
- object not serializable (class: java.lang.Object, value: java.lang.Object#cec590c)
- field (class: DataFrameTest$$anonfun$1, name: nonLocalReturnKey1$1, type: class java.lang.Object)
- object (class DataFrameTest$$anonfun$1, <function1>)
- element of array (index: 1)
- array (class [Ljava.lang.Object;, size 5)
- field (class: org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13, name: references$1, type: class [Ljava.lang.Object;)
- object (class org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13, <function2>)
at org.apache.spark.serializer.SerializationDebugger$.improveException(SerializationDebugger.scala:40)
at org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:46)
at org.apache.spark.serializer.JavaSerializerInstance.serialize(JavaSerializer.scala:100)
at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:413)
... 48 more
20/07/18 20:10:13 INFO SparkContext: Invoking stop() from shutdown hook
20/07/18 20:10:13 INFO SparkUI: Stopped Spark web UI at http://192.168.31.239:4040
20/07/18 20:10:13 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
20/07/18 20:10:13 INFO MemoryStore: MemoryStore cleared
20/07/18 20:10:13 INFO BlockManager: BlockManager stopped
20/07/18 20:10:13 INFO BlockManagerMaster: BlockManagerMaster stopped
20/07/18 20:10:13 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
20/07/18 20:10:13 INFO SparkContext: Successfully stopped SparkContext
20/07/18 20:10:13 INFO ShutdownHookManager: Shutdown hook called
20/07/18 20:10:13 INFO ShutdownHookManager: Deleting directory /private/var/folders/33/3n6vtfs54mdb7x6882fyqy4mccfmvg/T/spark-3e071448-7ad7-47b8-bf70-68ab74721aa2
Process finished with exit code 1

Remove return keyword in below line.
Change below code
var user_rdd = user_df.filter((item)=>{
return item.getString(2) == "New York"
})
with below line
var user_rdd = user_df.filter(_.getString(2) == "New York")
or
user_df.filter($"user_city" === "New York").count
Also refactor your code like below.
val df = Seq((1,"John","London"),(1,"Martin","New York"),(1,"Abhishek","New York"))
.toDF("user_id","user_name","user_city")
df.filter($"user_city" === "New York").count

Related

cannot assign instance of scala.collection.immutable.List$SerializationProxy to field org.apache.spark.sql.execution.datasources.v2.DataSourceRDD

I am trying to submit a pyspark job to k8s spark cluster using airflow. In that spark job I am using writestream foreachBatch function to write streaming data and irrespective of the sink type facing this issue only when I am trying to write data :
Inside spark cluster
version: spark 3.3.0
pyspark 3.3
scala 2.12.15
OpenJDK 64-Bit Server VM,11.0.15
Inside airflow
spark version 3.1.2
pyspark 3.1.2
scala version 2.12.10
OpenJDK 64-Bit Server VM,1.8.0
dependencies: org.scala-lang:scala-library:2.12.8,org.apache.spark:spark-sql-kafka-0-10_2.12:3.3.0,org.apache.spark:spark-sql_2.12:3.3.0,org.apache.spark:spark-core_2.12:3.3.0,org.postgresql:postgresql:42.3.3 .
Dag which I am using to submit is:
import airflow
from datetime import timedelta
from airflow import DAG
from time import sleep
from datetime import datetime
from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator
dag = DAG( dag_id = 'testpostgres.py', schedule_interval=None , start_date=datetime(2022, 1, 1), catchup=False)
spark_job = SparkSubmitOperator(application= '/usr/local/airflow/data/testpostgres.py',
conn_id= 'spark_kcluster',
task_id= 'spark_job_test',
dag= dag,
packages= "org.scala-lang:scala-library:2.12.8,org.apache.spark:spark-sql-kafka-0-10_2.12:3.3.0,org.apache.spark:spark-sql_2.12:3.3.0,org.apache.spark:spark-core_2.12:3.3.0,org.postgresql:postgresql:42.3.3",
conf ={
'deploy-mode' : 'cluster',
'executor_cores' : 1,
'EXECUTORS_MEM' : '2G',
'name' : 'spark-py',
'spark.kubernetes.namespace' : 'sandbox',
'spark.kubernetes.file.upload.path' : '/usr/local/airflow/data',
'spark.kubernetes.container.image' : '**********',
'spark.kubernetes.container.image.pullPolicy' : 'IfNotPresent',
'spark.kubernetes.authenticate.driver.serviceAccountName' : 'spark',
'spark.kubernetes.driver.volumes.persistentVolumeClaim.rwopvc.options.claimName' : 'data-pvc',
'spark.kubernetes.driver.volumes.persistentVolumeClaim.rwopvc.mount.path' : '/usr/local/airflow/data',
'spark.driver.extraJavaOptions' : '-Divy.cache.dir=/tmp -Divy.home=/tmp'
}
)
This is the job I am trying to submit :
from pyspark.sql.functions import *
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
from pyspark.sql.functions import dayofweek
from pyspark.sql.functions import date_format
from pyspark.sql.functions import hour
from functools import reduce
from pyspark.sql.types import DoubleType, StringType, ArrayType
import pandas as pd
import json
spark = SparkSession.builder.appName('spark).getOrCreate()
kafka_topic_name = '****'
kafka_bootstrap_servers = '*********' + ':' + '*****'
streaming_dataframe = spark.readStream.format("kafka").option("kafka.bootstrap.servers", kafka_bootstrap_servers).option("subscribe", kafka_topic_name).option("startingOffsets", "earliest").load()
streaming_dataframe = streaming_dataframe.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
dataframe_schema = '******'
streaming_dataframe = streaming_dataframe.select(from_csv(col("value"), dataframe_schema).alias("pipeline")).select("pipeline.*")
tumblingWindows = streaming_dataframe.withWatermark("timeStamp", "48 hour").groupBy(window("timeStamp", "24 hour", "1 hour"), "phoneNumber").agg((F.first(F.col("duration")).alias("firstDuration")))
tumblingWindows = tumblingWindows.withColumn("start_window", F.col('window')['start'])
tumblingWindows = tumblingWindows.withColumn("end_window", F.col('window')['end'])
tumblingWindows = tumblingWindows.drop('window')
def postgres_write(tumblingWindows, epoch_id):
tumblingWindows.write.jdbc(url=db_target_url, table=table_postgres, mode='append', properties=db_target_properties)
pass
db_target_url = 'jdbc:postgresql://' + '*******'+ ':' + '****' + '/' + 'test'
table_postgres = '******'
db_target_properties = {
'user': 'postgres',
'password': 'postgres',
'driver': 'org.postgresql.Driver'
}
query = tumblingWindows.writeStream.foreachBatch(postgres_write).start().awaitTermination()
Error logs:
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2672)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2608)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2607)
at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2607)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1182)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1182)
at scala.Option.foreach(Option.scala:407)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1182)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2860)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2802)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2791)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:952)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2228)
at org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec.writeWithV2(WriteToDataSourceV2Exec.scala:377)
... 42 more
Caused by: java.lang.ClassCastException: cannot assign instance of scala.collection.immutable.List$SerializationProxy to field org.apache.spark.sql.execution.datasources.v2.DataSourceRDDPartition.inputPartitions of type scala.collection.Seq in instance of org.apache.spark.sql.execution.datasources.v2.DataSourceRDDPartition
at java.base/java.io.ObjectStreamClass$FieldReflector.setObjFieldValues(Unknown Source)
at java.base/java.io.ObjectStreamClass$FieldReflector.checkObjectFieldValueTypes(Unknown Source)
at java.base/java.io.ObjectStreamClass.checkObjFieldValueTypes(Unknown Source)
at java.base/java.io.ObjectInputStream.defaultCheckFieldValues(Unknown Source)
at java.base/java.io.ObjectInputStream.readSerialData(Unknown Source)
at java.base/java.io.ObjectInputStream.readOrdinaryObject(Unknown Source)
at java.base/java.io.ObjectInputStream.readObject0(Unknown Source)
at java.base/java.io.ObjectInputStream.defaultReadFields(Unknown Source)
at java.base/java.io.ObjectInputStream.readSerialData(Unknown Source)
at java.base/java.io.ObjectInputStream.readOrdinaryObject(Unknown Source)
at java.base/java.io.ObjectInputStream.readObject0(Unknown Source)
at java.base/java.io.ObjectInputStream.readObject(Unknown Source)
at java.base/java.io.ObjectInputStream.readObject(Unknown Source)
at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:87)
at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:129)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:507)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
at java.base/java.lang.Thread.run(Unknown Source)
Traceback (most recent call last):
File "/usr/local/airflow/data/spark-upload-d03175bc-8c50-4baf-8383-a203182f16c0/debug.py", line 20, in <module>
streaming_dataframe.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")\
File "/opt/spark/python/lib/pyspark.zip/pyspark/sql/streaming.py", line 107, in awaitTermination
File "/opt/spark/python/lib/py4j-0.10.9.5-src.zip/py4j/java_gateway.py", line 1321, in __call__
File "/opt/spark/python/lib/pyspark.zip/pyspark/sql/utils.py", line 196, in deco
pyspark.sql.utils.StreamingQueryException: Query [id = d0e140c1-830d-49c8-88b7-90b82d301408, runId = c0f38f58-6571-4fda-b3e0-98e4ffaf8c7a] terminated with exception: Writing job aborted
22/08/24 10:12:53 INFO SparkUI: Stopped Spark web UI at ************************
22/08/24 10:12:53 INFO KubernetesClusterSchedulerBackend: Shutting down all executors
22/08/24 10:12:53 INFO KubernetesClusterSchedulerBackend$KubernetesDriverEndpoint: Asking each executor to shut down
22/08/24 10:12:53 WARN ExecutorPodsWatchSnapshotSource: Kubernetes client has been closed.
22/08/24 10:12:53 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
22/08/24 10:12:53 INFO MemoryStore: MemoryStore cleared
22/08/24 10:12:53 INFO BlockManager: BlockManager stopped
22/08/24 10:12:53 INFO BlockManagerMaster: BlockManagerMaster stopped
22/08/24 10:12:53 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
22/08/24 10:12:54 INFO SparkContext: Successfully stopped SparkContext
22/08/24 10:12:54 INFO ShutdownHookManager: Shutdown hook called
22/08/24 10:12:54 INFO ShutdownHookManager: Deleting directory /var/data/spark-32ef85e0-e85c-4ac6-a46d-d3379ca58468/spark-adecf44a-dc60-4a85-bbe3-bc125f5cc39f/pyspark-f3ffaa5e-a490-464a-98d2-fbce223628eb
22/08/24 10:12:54 INFO ShutdownHookManager: Deleting directory /var/data/spark-32ef85e0-e85c-4ac6-a46d-d3379ca58468/spark-adecf44a-dc60-4a85-bbe3-bc125f5cc39f
22/08/24 10:12:54 INFO ShutdownHookManager: Deleting directory /tmp/spark-5acdd5e6-7f6e-45ec-adae-e98862e1537c```
I faced this issue recently. I think it occurs when shuffling data coming from Kafka.
I fixed it by loading all dependencies(jars) of org.apache.spark:spark-sql-kafka-0-10_2.12:3.3.0 to the project. You can find them here.
For now, i dont know which ones are enough.

Apache Beam - Error with example using Spark Runner pointing to a local spark master URL

I need to support a use case where we are able to run Beam pipelines in an external spark URL.
I took a basic example of a beam pipeline as below
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
class ConvertToByteArray(beam.DoFn):
def __init__(self):
pass
def setup(self):
pass
def process(self, row):
try:
yield bytearray(row + '\n', 'utf-8')
except Exception as e:
raise e
def run():
options = PipelineOptions([
"--runner=SparkRunner",
# "--spark_master_url=spark://0.0.0.0:7077",
# "--spark_version=3",
])
with beam.Pipeline(options=options) as p:
lines = (p
| 'Create words' >> beam.Create(['this is working'])
| 'Split words' >> beam.FlatMap(lambda words: words.split(' '))
| 'Build byte array' >> beam.ParDo(ConvertToByteArray())
| 'Group' >> beam.GroupBy() # Do future batching here
| 'print output' >> beam.Map(print)
)
if __name__ == "__main__":
run()
I try to run this pipeline in 2 ways
Using Apache Beam's internal spark runner
Running Spark locally and passing the spark master URL.
Approach 1 works fine and i'm able to see the output (screenshot below)
Screenshot of Output
Approach 2 gives a class incompatible error on the spark server
Running spark as a docker container and natively on my machine both gave the same error.
Exception trace
Spark Executor Command: "/opt/bitnami/java/bin/java" "-cp" "/opt/bitnami/spark/conf/:/opt/bitnami/spark/jars/*" "-Xmx1024M" "-Dspark.driver.port=62420" "org.apache.spark.executor.CoarseGrainedExecutorBackend" "--driver-url" "spark://CoarseGrainedScheduler#192.168.8.120:62420" "--executor-id" "0" "--hostname" "172.17.0.2" "--cores" "4" "--app-id" "app-20220425143553-0000" "--worker-url" "spark://Worker#172.17.0.2:44575"
========================================
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
22/04/25 14:35:55 INFO CoarseGrainedExecutorBackend: Started daemon with process name: 165#ace075bc56c0
22/04/25 14:35:55 INFO SignalUtils: Registering signal handler for TERM
22/04/25 14:35:55 INFO SignalUtils: Registering signal handler for HUP
22/04/25 14:35:55 INFO SignalUtils: Registering signal handler for INT
22/04/25 14:35:56 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
22/04/25 14:35:56 INFO SecurityManager: Changing view acls to: spark,nikamath
22/04/25 14:35:56 INFO SecurityManager: Changing modify acls to: spark,nikamath
22/04/25 14:35:56 INFO SecurityManager: Changing view acls groups to:
22/04/25 14:35:56 INFO SecurityManager: Changing modify acls groups to:
22/04/25 14:35:56 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(spark, nikamath); groups with view permissions: Set(); users with modify permissions: Set(spark, nikamath); groups with modify permissions: Set()
22/04/25 14:35:57 INFO TransportClientFactory: Successfully created connection to /192.168.8.120:62420 after 194 ms (0 ms spent in bootstraps)
22/04/25 14:35:57 INFO SecurityManager: Changing view acls to: spark,nikamath
22/04/25 14:35:57 INFO SecurityManager: Changing modify acls to: spark,nikamath
22/04/25 14:35:57 INFO SecurityManager: Changing view acls groups to:
22/04/25 14:35:57 INFO SecurityManager: Changing modify acls groups to:
22/04/25 14:35:57 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(spark, nikamath); groups with view permissions: Set(); users with modify permissions: Set(spark, nikamath); groups with modify permissions: Set()
22/04/25 14:35:57 INFO TransportClientFactory: Successfully created connection to /192.168.8.120:62420 after 19 ms (0 ms spent in bootstraps)
22/04/25 14:35:58 INFO DiskBlockManager: Created local directory at /tmp/spark-41a0e50e-81aa-48b7-ae55-888ae3a0a4ca/executor-a5ff3b34-0166-405f-9c00-d5a5ee6f8688/blockmgr-d7bd5b95-ddb3-4642-9863-261a6e109fc4
22/04/25 14:35:58 INFO MemoryStore: MemoryStore started with capacity 366.3 MiB
22/04/25 14:35:58 INFO CoarseGrainedExecutorBackend: Connecting to driver: spark://CoarseGrainedScheduler#192.168.8.120:62420
22/04/25 14:35:58 INFO WorkerWatcher: Connecting to worker spark://Worker#172.17.0.2:44575
22/04/25 14:35:58 INFO TransportClientFactory: Successfully created connection to /172.17.0.2:44575 after 7 ms (0 ms spent in bootstraps)
22/04/25 14:35:58 INFO WorkerWatcher: Successfully connected to spark://Worker#172.17.0.2:44575
22/04/25 14:35:58 INFO ResourceUtils: ==============================================================
22/04/25 14:35:58 INFO ResourceUtils: No custom resources configured for spark.executor.
22/04/25 14:35:58 INFO ResourceUtils: ==============================================================
22/04/25 14:35:58 INFO CoarseGrainedExecutorBackend: Successfully registered with driver
22/04/25 14:35:58 INFO Executor: Starting executor ID 0 on host 172.17.0.2
22/04/25 14:35:59 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 39027.
22/04/25 14:35:59 INFO NettyBlockTransferService: Server created on 172.17.0.2:39027
22/04/25 14:35:59 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
22/04/25 14:35:59 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(0, 172.17.0.2, 39027, None)
22/04/25 14:35:59 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(0, 172.17.0.2, 39027, None)
22/04/25 14:35:59 INFO BlockManager: Initialized BlockManager: BlockManagerId(0, 172.17.0.2, 39027, None)
22/04/25 14:35:59 INFO Executor: Fetching spark://192.168.8.120:62420/jars/beam-runners-spark-3-job-server-2.33.0.jar with timestamp 1650897351765
22/04/25 14:35:59 INFO TransportClientFactory: Successfully created connection to /192.168.8.120:62420 after 5 ms (0 ms spent in bootstraps)
22/04/25 14:35:59 INFO Utils: Fetching spark://192.168.8.120:62420/jars/beam-runners-spark-3-job-server-2.33.0.jar to /tmp/spark-41a0e50e-81aa-48b7-ae55-888ae3a0a4ca/executor-a5ff3b34-0166-405f-9c00-d5a5ee6f8688/spark-cd481993-e8df-46fd-b00c-9a31e17d245d/fetchFileTemp1955801007454794690.tmp
22/04/25 14:36:02 INFO Utils: Copying /tmp/spark-41a0e50e-81aa-48b7-ae55-888ae3a0a4ca/executor-a5ff3b34-0166-405f-9c00-d5a5ee6f8688/spark-cd481993-e8df-46fd-b00c-9a31e17d245d/-16622853561650897351765_cache to /opt/bitnami/spark/work/app-20220425143553-0000/0/./beam-runners-spark-3-job-server-2.33.0.jar
22/04/25 14:36:04 INFO Executor: Adding file:/opt/bitnami/spark/work/app-20220425143553-0000/0/./beam-runners-spark-3-job-server-2.33.0.jar to class loader
22/04/25 14:36:04 INFO CoarseGrainedExecutorBackend: Got assigned task 0
22/04/25 14:36:04 INFO CoarseGrainedExecutorBackend: Got assigned task 1
22/04/25 14:36:04 INFO Executor: Running task 1.0 in stage 0.0 (TID 1)
22/04/25 14:36:04 INFO Executor: Running task 0.0 in stage 0.0 (TID 0)
22/04/25 14:36:04 ERROR Executor: Exception in task 0.0 in stage 0.0 (TID 0)
java.io.InvalidClassException: org.apache.spark.util.AccumulatorV2; local class incompatible: stream classdesc serialVersionUID = 8273715124741334009, local class serialVersionUID = 574976528730727648
at java.io.ObjectStreamClass.initNonProxy(ObjectStreamClass.java:699)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:2005)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1852)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:2005)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1852)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2186)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1669)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2431)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2355)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2213)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1669)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:503)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:461)
at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:76)
at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:109)
at org.apache.spark.scheduler.Task.metrics$lzycompute(Task.scala:72)
at org.apache.spark.scheduler.Task.metrics(Task.scala:71)
at org.apache.spark.scheduler.Task.run(Task.scala:100)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:506)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1462)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:509)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:750)
22/04/25 14:36:04 ERROR Executor: Exception in task 1.0 in stage 0.0 (TID 1)
java.io.InvalidClassException: org.apache.spark.util.AccumulatorV2; local class incompatible: stream classdesc serialVersionUID = 8273715124741334009, local class serialVersionUID = 574976528730727648
at java.io.ObjectStreamClass.initNonProxy(ObjectStreamClass.java:699)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:2005)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1852)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:2005)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1852)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2186)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1669)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2431)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2355)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2213)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1669)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:503)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:461)
at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:76)
at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:109)
at org.apache.spark.scheduler.Task.metrics$lzycompute(Task.scala:72)
at org.apache.spark.scheduler.Task.metrics(Task.scala:71)
at org.apache.spark.scheduler.Task.run(Task.scala:100)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:506)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1462)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:509)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:750)
22/04/25 14:36:04 ERROR SparkUncaughtExceptionHandler: Uncaught exception in thread Thread[Executor task launch worker for task 0.0 in stage 0.0 (TID 0),5,main]
java.lang.Error: java.io.InvalidClassException: org.apache.spark.util.AccumulatorV2; local class incompatible: stream classdesc serialVersionUID = 8273715124741334009, local class serialVersionUID = 574976528730727648
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1155)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:750)
Caused by: java.io.InvalidClassException: org.apache.spark.util.AccumulatorV2; local class incompatible: stream classdesc serialVersionUID = 8273715124741334009, local class serialVersionUID = 574976528730727648
at java.io.ObjectStreamClass.initNonProxy(ObjectStreamClass.java:699)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:2005)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1852)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:2005)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1852)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2186)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1669)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2431)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2355)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2213)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1669)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:503)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:461)
at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:76)
at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:109)
at org.apache.spark.scheduler.Task.metrics$lzycompute(Task.scala:72)
at org.apache.spark.scheduler.Task.metrics(Task.scala:71)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$collectAccumulatorsAndResetStatusOnFailure$1(Executor.scala:424)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$collectAccumulatorsAndResetStatusOnFailure$1$adapted(Executor.scala:423)
at scala.Option.foreach(Option.scala:407)
at org.apache.spark.executor.Executor$TaskRunner.collectAccumulatorsAndResetStatusOnFailure(Executor.scala:423)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:704)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
... 2 more
22/04/25 14:36:04 ERROR SparkUncaughtExceptionHandler: Uncaught exception in thread Thread[Executor task launch worker for task 1.0 in stage 0.0 (TID 1),5,main]
java.lang.Error: java.io.InvalidClassException: org.apache.spark.util.AccumulatorV2; local class incompatible: stream classdesc serialVersionUID = 8273715124741334009, local class serialVersionUID = 574976528730727648
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1155)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:750)
Caused by: java.io.InvalidClassException: org.apache.spark.util.AccumulatorV2; local class incompatible: stream classdesc serialVersionUID = 8273715124741334009, local class serialVersionUID = 574976528730727648
at java.io.ObjectStreamClass.initNonProxy(ObjectStreamClass.java:699)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:2005)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1852)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:2005)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1852)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2186)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1669)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2431)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2355)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2213)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1669)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:503)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:461)
at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:76)
at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:109)
at org.apache.spark.scheduler.Task.metrics$lzycompute(Task.scala:72)
at org.apache.spark.scheduler.Task.metrics(Task.scala:71)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$collectAccumulatorsAndResetStatusOnFailure$1(Executor.scala:424)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$collectAccumulatorsAndResetStatusOnFailure$1$adapted(Executor.scala:423)
at scala.Option.foreach(Option.scala:407)
at org.apache.spark.executor.Executor$TaskRunner.collectAccumulatorsAndResetStatusOnFailure(Executor.scala:423)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:704)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
... 2 more
22/04/25 14:36:05 INFO MemoryStore: MemoryStore cleared
22/04/25 14:36:05 INFO BlockManager: BlockManager stopped
22/04/25 14:36:05 INFO ShutdownHookManager: Shutdown hook called
22/04/25 14:36:05 INFO ShutdownHookManager: Deleting directory /tmp/spark-41a0e50e-81aa-48b7-ae55-888ae3a0a4ca/executor-a5ff3b34-0166-405f-9c00-d5a5ee6f8688/spark-cd481993-e8df-46fd-b00c-9a31e17d245d
I confirmed that a worker was running and the spark was set up properly.
I submitted a sample spark job to this master and get it to work implying that there isn't anything wrong with the spark master or worker.
Versions used:
Spark 3.2.1
Apache Beam 2.37.0
Python 3.7

Spark fails to load data frame from elasticsearch due to field format exception

My dataframe fails due to NumberFormatException on one of the nested JSON fields when reading from Elasticsearch .
I am not providing any schema as it should be inferred automatically from Elastic.
package org.arc
import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.log4j._
import scala.io.Source
import java.nio.charset.CodingErrorAction
import scala.io.Codec
import org.apache.spark.storage.StorageLevel
import org.apache.spark._
import org.apache.spark.sql.SparkSession
import org.apache.spark.util.Utils
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.expressions
import org.apache.spark.sql.functions.{ concat, lit }
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.Row
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.types._
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.types.{ StructType, StructField, StringType };
import org.apache.spark.serializer.KryoSerializer
object SparkOnES {
def main(args: Array[String]) {
val spark = SparkSession
.builder()
.appName("SparkESTest")
.config("spark.master", "local[*]")
.config("spark.sql.warehouse.dir", "C://SparkScala//SparkLocal//spark-warehouse")
.enableHiveSupport()
.getOrCreate()
//1.Read Sample JSON
import spark.implicits._
//val myjson = spark.read.json("C:\\Users\\jasjyotsinghj599\\Desktop\\SampleTest.json")
// myjson.show(false)
//2.Read Data from ES
val esdf = spark.read.format("org.elasticsearch.spark.sql")
.option("es.nodes", "XXXXXX")
.option("es.port", "80")
.option("es.query", "?q=*")
.option("es.nodes.wan.only", "true")
.option("pushdown", "true")
.option("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.load("batch_index/ticket")
esdf.createOrReplaceTempView("esdf")
spark.sql("Select * from esdf limit 1").show(false)
val esdf_fltr_lt = esdf.take(1)
}
}
The ErrorStack says that it cannot parse the input field.Looking at the exception, this issue seems to have caused due to mismatch in the type of data expected ( int, float, double ) and received ( string ) :
Caused by: java.lang.NumberFormatException: For input string: "161.60"
at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)
at java.lang.Long.parseLong(Long.java:589)
at java.lang.Long.parseLong(Long.java:631)
at scala.collection.immutable.StringLike$class.toLong(StringLike.scala:277)
at scala.collection.immutable.StringOps.toLong(StringOps.scala:29)
at org.elasticsearch.spark.serialization.ScalaValueReader.parseLong(ScalaValueReader.scala:142)
at org.elasticsearch.spark.serialization.ScalaValueReader$$anonfun$longValue$1.apply(ScalaValueReader.scala:141)
at org.elasticsearch.spark.serialization.ScalaValueReader$$anonfun$longValue$1.apply(ScalaValueReader.scala:141)
at org.elasticsearch.spark.serialization.ScalaValueReader.checkNull(ScalaValueReader.scala:120)
at org.elasticsearch.spark.serialization.ScalaValueReader.longValue(ScalaValueReader.scala:141)
at org.elasticsearch.spark.serialization.ScalaValueReader.readValue(ScalaValueReader.scala:89)
at org.elasticsearch.spark.sql.ScalaRowValueReader.readValue(ScalaEsRowValueReader.scala:46)
at org.elasticsearch.hadoop.serialization.ScrollReader.parseValue(ScrollReader.java:770)
at org.elasticsearch.hadoop.serialization.ScrollReader.read(ScrollReader.java:720)
... 25 more
18/04/25 23:33:53 WARN TaskSetManager: Lost task 3.0 in stage 1.0 (TID 4, localhost): org.elasticsearch.hadoop.rest.EsHadoopParsingException: Cannot parse value [161.60] for field [tvl_tkt_tot_chrg_amt]
at org.elasticsearch.hadoop.serialization.ScrollReader.read(ScrollReader.java:723)
at org.elasticsearch.hadoop.serialization.ScrollReader.map(ScrollReader.java:867)
at org.elasticsearch.hadoop.serialization.ScrollReader.read(ScrollReader.java:710)
at org.elasticsearch.hadoop.serialization.ScrollReader.readHitAsMap(ScrollReader.java:476)
at org.elasticsearch.hadoop.serialization.ScrollReader.readHit(ScrollReader.java:401)
at org.elasticsearch.hadoop.serialization.ScrollReader.read(ScrollReader.java:296)
at org.elasticsearch.hadoop.serialization.ScrollReader.read(ScrollReader.java:269)
at org.elasticsearch.hadoop.rest.RestRepository.scroll(RestRepository.java:393)
at org.elasticsearch.hadoop.rest.ScrollQuery.hasNext(ScrollQuery.java:92)
at org.elasticsearch.spark.rdd.AbstractEsRDDIterator.hasNext(AbstractEsRDDIterator.scala:61)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:246)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:240)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
at org.apache.spark.scheduler.Task.run(Task.scala:85)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.NumberFormatException: For input string: "161.60"
at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)
at java.lang.Long.parseLong(Long.java:589)
at java.lang.Long.parseLong(Long.java:631)
at scala.collection.immutable.StringLike$class.toLong(StringLike.scala:277)
at scala.collection.immutable.StringOps.toLong(StringOps.scala:29)
at org.elasticsearch.spark.serialization.ScalaValueReader.parseLong(ScalaValueReader.scala:142)
at org.elasticsearch.spark.serialization.ScalaValueReader$$anonfun$longValue$1.apply(ScalaValueReader.scala:141)
at org.elasticsearch.spark.serialization.ScalaValueReader$$anonfun$longValue$1.apply(ScalaValueReader.scala:141)
at org.elasticsearch.spark.serialization.ScalaValueReader.checkNull(ScalaValueReader.scala:120)
at org.elasticsearch.spark.serialization.ScalaValueReader.longValue(ScalaValueReader.scala:141)
at org.elasticsearch.spark.serialization.ScalaValueReader.readValue(ScalaValueReader.scala:89)
at org.elasticsearch.spark.sql.ScalaRowValueReader.readValue(ScalaEsRowValueReader.scala:46)
at org.elasticsearch.hadoop.serialization.ScrollReader.parseValue(ScrollReader.java:770)
at org.elasticsearch.hadoop.serialization.ScrollReader.read(ScrollReader.java:720)
... 25 more
18/04/25 23:33:53 INFO SparkContext: Invoking stop() from shutdown hook
18/04/25 23:33:53 INFO SparkUI: Stopped Spark web UI at http://10.1.2.244:4040
18/04/25 23:33:53 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
18/04/25 23:33:53 INFO MemoryStore: MemoryStore cleared
18/04/25 23:33:53 INFO BlockManager: BlockManager stopped
18/04/25 23:33:53 INFO BlockManagerMaster: BlockManagerMaster stopped
18/04/25 23:33:53 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
18/04/25 23:33:53 INFO SparkContext: Successfully stopped SparkContext
18/04/25 23:33:53 INFO ShutdownHookManager: Shutdown hook called
18/04/25 23:33:53 INFO ShutdownHookManager: Deleting directory

Spring boot - Spark Application: Unable to process a file on its nodes

I have the below setup:
Spark Master and Slaves configured and running in my local.
17/11/01 18:03:52 INFO Utils: Successfully started service 'sparkMaster' on port 7077.
17/11/01 18:03:52 INFO Master: Starting Spark master at spark://127.0.0.1:7077
17/11/01 18:03:52 INFO Master: Running Spark version 2.2.0
17/11/01 18:03:52 INFO Utils: Successfully started service 'MasterUI' on port 8080.
I have a spring boot application whose properties file contents look like the below:
spark.home=/usr/local/Cellar/apache-spark/2.2.0/bin/
master.uri=spark://127.0.0.1:7077
#Autowired
SparkConf sparkConf;
public void processFile(String inputFile, String outputFile) {
JavaSparkContext javaSparkContext;
SparkContext sc = new SparkContext(sparkConf);
SerializationWrapper sw= new SerializationWrapper() {
private static final long serialVersionUID = 1L;
#Override
public JavaSparkContext createJavaSparkContext() {
// TODO Auto-generated method stub
return JavaSparkContext.fromSparkContext(sc);
}
};
javaSparkContext=sw.createJavaSparkContext();
JavaRDD<String> lines = javaSparkContext.textFile(inputFile);
Broadcast<JavaRDD<String>> outputLines;
outputLines = javaSparkContext.broadcast(lines.map(new Function<String, String>() {
/**
*
*/
private static final long serialVersionUID = 1L;
#Override
public String call(String arg0) throws Exception {
// TODO Auto-generated method stub
return arg0;
}
}));
outputLines.getValue().saveAsTextFile(outputFile);
//javaSparkContext.close();
}
When i run the code I'm getting the below error:
17/11/01 18:16:36 INFO TorrentBroadcast: Started reading broadcast variable 2
17/11/01 18:16:36 INFO TransportClientFactory: Successfully created connection to /192.168.0.135:51903 after 1 ms (0 ms spent in bootstraps)
17/11/01 18:16:36 INFO MemoryStore: Block broadcast_2_piece0 stored as bytes in memory (estimated size 24.4 KB, free 366.3 MB)
17/11/01 18:16:36 INFO TorrentBroadcast: Reading broadcast variable 2 took 82 ms
17/11/01 18:16:36 INFO MemoryStore: Block broadcast_2 stored as values in memory (estimated size 67.2 KB, free 366.2 MB)
17/11/01 18:16:36 ERROR Executor: Exception in task 1.0 in stage 0.0 (TID 1)
java.lang.ClassCastException: cannot assign instance of scala.collection.immutable.List$SerializationProxy to field org.apache.spark.rdd.RDD.org$apache$spark$rdd$RDD$$dependencies_ of type scala.collection.Seq in instance of org.apache.spark.rdd.MapPartitionsRDD
at java.io.ObjectStreamClass$FieldReflector.setObjFieldValues(ObjectStreamClass.java:2133)
at java.io.ObjectStreamClass.setObjFieldValues(ObjectStreamClass.java:1305)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2251)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2245)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:422)
at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75)
at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:80)
at org.apache.spark.scheduler.Task.run(Task.scala:108)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:748)
17/11/01 18:16:36 ERROR Executor: Exception in task 0.0 in stage 0.0 (TID 0)
The springboot-spark app should process the files based on REST API call where i get the input and output file location shared across the Spark nodes.
Any suggestions to fix the above errors
I think you should not broadcast a JavaRDD, since a RDD is already distributed among your cluster nodes.

Persist RDD as Avro File

I have written this sample program to persist an RDD into an avro file.
I am using CDH 5.4 with Spark 1.3
I wrote this avsc file and then generated code for class User
{"namespace": "com.abhi",
"type": "record",
"name": "User",
"fields": [
{"name": "firstname", "type": "string"},
{"name": "lastname", "type": "string"} ]
}
Then I generated the code for User
java -jar ~/Downloads/avro-tools-1.7.7.jar compile schema User.avsc .
The I wrote my example
package com.abhi
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkConf
import org.apache.avro.generic.GenericRecord
import org.apache.avro.mapred.AvroKey
import org.apache.avro.mapreduce.{AvroKeyOutputFormat, AvroJob, AvroKeyInputFormat}
import org.apache.hadoop.io.NullWritable
import org.apache.spark.SparkContext
object MySpark {
def main(args : Array[String]) : Unit = {
val sf = new SparkConf()
.setMaster("local[2]")
.setAppName("MySpark")
val sc = new SparkContext(sf)
val user1 = new User();
user1.setFirstname("Test1");
user1.setLastname("Test2");
val user2 = new User("Test3", "Test4");
// Construct via builder
val user3 = User.newBuilder()
.setFirstname("Test5")
.setLastname("Test6")
.build()
val list = Array(user1, user2, user3)
val userRdd = sc.parallelize(list)
val job: Job = Job.getInstance()
AvroJob.setOutputKeySchema(job, user1.getSchema)
val output = "/user/cloudera/users.avro"
userRdd.map(row => (new AvroKey(row), NullWritable.get()))
.saveAsNewAPIHadoopFile(
output,
classOf[AvroKey[User]],
classOf[NullWritable],
classOf[AvroKeyOutputFormat[User]],
job.getConfiguration)
}
}
I have two concerns with this code
Some of the imports are from the old mapreduce api and I wonder why are they required for Spark code
import org.apache.hadoop.mapreduce.Job
import org.apache.avro.mapred.AvroKey
import org.apache.avro.mapreduce.{AvroKeyOutputFormat, AvroJob,
AvroKeyInputFormat}
The code throws an exception when I submit it to the hadoop cluster
It does create an empty directory called /user/cloudera/users.avro in HDFS
15/11/01 08:20:42 INFO Configuration.deprecation: mapred.output.dir is deprecated. Instead, use mapreduce.output.fileoutputformat.outputdir
15/11/01 08:20:42 INFO output.FileOutputCommitter: File Output Committer Algorithm version is 1
15/11/01 08:20:42 INFO spark.SparkContext: Starting job: saveAsNewAPIHadoopFile at MySpark.scala:52
15/11/01 08:20:42 INFO scheduler.DAGScheduler: Got job 1 (saveAsNewAPIHadoopFile at MySpark.scala:52) with 2 output partitions (allowLocal=false)
15/11/01 08:20:42 INFO scheduler.DAGScheduler: Final stage: Stage 1(saveAsNewAPIHadoopFile at MySpark.scala:52)
15/11/01 08:20:42 INFO scheduler.DAGScheduler: Parents of final stage: List()
15/11/01 08:20:42 INFO scheduler.DAGScheduler: Missing parents: List()
15/11/01 08:20:42 INFO scheduler.DAGScheduler: Submitting Stage 1 (MapPartitionsRDD[2] at map at MySpark.scala:51), which has no missing parents
15/11/01 08:20:42 INFO storage.MemoryStore: ensureFreeSpace(66904) called with curMem=301745, maxMem=280248975
15/11/01 08:20:42 INFO storage.MemoryStore: Block broadcast_2 stored as values in memory (estimated size 65.3 KB, free 266.9 MB)
15/11/01 08:20:42 INFO storage.MemoryStore: ensureFreeSpace(23066) called with curMem=368649, maxMem=280248975
15/11/01 08:20:42 INFO storage.MemoryStore: Block broadcast_2_piece0 stored as bytes in memory (estimated size 22.5 KB, free 266.9 MB)
15/11/01 08:20:42 INFO storage.BlockManagerInfo: Added broadcast_2_piece0 in memory on localhost:34630 (size: 22.5 KB, free: 267.2 MB)
15/11/01 08:20:42 INFO storage.BlockManagerMaster: Updated info of block broadcast_2_piece0
15/11/01 08:20:42 INFO spark.SparkContext: Created broadcast 2 from broadcast at DAGScheduler.scala:839
15/11/01 08:20:42 INFO scheduler.DAGScheduler: Submitting 2 missing tasks from Stage 1 (MapPartitionsRDD[2] at map at MySpark.scala:51)
15/11/01 08:20:42 INFO scheduler.TaskSchedulerImpl: Adding task set 1.0 with 2 tasks
15/11/01 08:20:42 ERROR scheduler.TaskSetManager: Failed to serialize task 1, not attempting to retry it.
java.lang.reflect.InvocationTargetException
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.spark.serializer.SerializationDebugger$ObjectStreamClassMethods$.getObjFieldValues$extension(SerializationDebugger.scala:240)
at org.apache.spark.serializer.SerializationDebugger$SerializationDebugger.visitSerializable(SerializationDebugger.scala:150)
at org.apache.spark.serializer.SerializationDebugger$SerializationDebugger.visit(SerializationDebugger.scala:99)
at org.apache.spark.serializer.SerializationDebugger$.find(SerializationDebugger.scala:58)
at org.apache.spark.serializer.SerializationDebugger$.improveException(SerializationDebugger.scala:39)
at org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:47)
at org.apache.spark.serializer.JavaSerializerInstance.serialize(JavaSerializer.scala:80)
at org.apache.spark.scheduler.Task$.serializeWithDependencies(Task.scala:149)
at org.apache.spark.scheduler.TaskSetManager.resourceOffer(TaskSetManager.scala:464)
at org.apache.spark.scheduler.TaskSchedulerImpl$$anonfun$org$apache$spark$scheduler$TaskSchedulerImpl$$resourceOfferSingleTaskSet$1.apply$mcVI$sp(TaskSchedulerImpl.scala:232)
at scala.collection.immutable.Range.foreach$mVc$sp(Range.scala:141)
at org.apache.spark.scheduler.TaskSchedulerImpl.org$apache$spark$scheduler$TaskSchedulerImpl$$resourceOfferSingleTaskSet(TaskSchedulerImpl.scala:227)
at org.apache.spark.scheduler.TaskSchedulerImpl$$anonfun$resourceOffers$3$$anonfun$apply$6.apply(TaskSchedulerImpl.scala:296)
at org.apache.spark.scheduler.TaskSchedulerImpl$$anonfun$resourceOffers$3$$anonfun$apply$6.apply(TaskSchedulerImpl.scala:294)
at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
Problem is that Spark can't serialise your User class, try setting up a KryoConfigurator and registering your class there.

Resources