Spark 3.1.1 | Unable to run job : IDENTIFIER expected instead of '[' - apache-spark

I'm trying to run a code that runs in spark 2.4.4 and I'm having the following error:
21/08/20 08:35:05 ERROR CodeGenerator: failed to compile: org.codehaus.commons.compiler.CompileException: File 'generated.java', Line 75, Column 32: IDENTIFIER expected instead of '['
org.codehaus.commons.compiler.CompileException: File 'generated.java', Line 75, Column 32: IDENTIFIER expected instead of '['
at org.codehaus.janino.TokenStreamImpl.read(TokenStreamImpl.java:196)
at org.codehaus.janino.Parser.read(Parser.java:3705)
at org.codehaus.janino.Parser.parseQualifiedIdentifier(Parser.java:446)
at org.codehaus.janino.Parser.parseReferenceType(Parser.java:2569)
at org.codehaus.janino.Parser.parseType(Parser.java:2549)
at org.codehaus.janino.Parser.parseFormalParameter(Parser.java:1688)
at org.codehaus.janino.Parser.parseFormalParameterList(Parser.java:1639)
at org.codehaus.janino.Parser.parseFormalParameters(Parser.java:1620)
at org.codehaus.janino.Parser.parseMethodDeclarationRest(Parser.java:1518)
at org.codehaus.janino.Parser.parseClassBodyDeclaration(Parser.java:1028)
at org.codehaus.janino.Parser.parseClassBody(Parser.java:841)
at org.codehaus.janino.Parser.parseClassDeclarationRest(Parser.java:736)
at org.codehaus.janino.Parser.parseClassBodyDeclaration(Parser.java:941)
at org.codehaus.janino.ClassBodyEvaluator.cook(ClassBodyEvaluator.java:234)
at org.codehaus.janino.SimpleCompiler.cook(SimpleCompiler.java:205)
at org.codehaus.commons.compiler.Cookable.cook(Cookable.java:80)
at org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator$.org$apache$spark$sql$catalyst$expressions$codegen$CodeGenerator$$doCompile(CodeGenerator.scala:1427)
at org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator$$anon$1.load(CodeGenerator.scala:1524)
at org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator$$anon$1.load(CodeGenerator.scala:1521)
at org.sparkproject.guava.cache.LocalCache$LoadingValueReference.loadFuture(LocalCache.java:3599)
at org.sparkproject.guava.cache.LocalCache$Segment.loadSync(LocalCache.java:2379)
at org.sparkproject.guava.cache.LocalCache$Segment.lockedGetOrLoad(LocalCache.java:2342)
at org.sparkproject.guava.cache.LocalCache$Segment.get(LocalCache.java:2257)
at org.sparkproject.guava.cache.LocalCache.get(LocalCache.java:4000)
at org.sparkproject.guava.cache.LocalCache.getOrLoad(LocalCache.java:4004)
at org.sparkproject.guava.cache.LocalCache$LocalLoadingCache.get(LocalCache.java:4874)
at org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator$.compile(CodeGenerator.scala:1375)
at org.apache.spark.sql.execution.WholeStageCodegenExec.liftedTree1$1(WholeStageCodegenExec.scala:721)
at org.apache.spark.sql.execution.WholeStageCodegenExec.doExecute(WholeStageCodegenExec.scala:720)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:185)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:223)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:220)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:181)
at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:134)
at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:133)
at org.apache.spark.sql.execution.streaming.sources.ForeachBatchSink.addBatch(ForeachBatchSink.scala:33)
at org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runBatch$16(MicroBatchExecution.scala:586)
at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
at org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:232)
at org.apache.spark.sql.execution.SQLExecution$.executeQuery$1(SQLExecution.scala:110)
at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:135)
at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
at org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:232)
at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:135)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:253)
at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:134)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:772)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
at org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runBatch$15(MicroBatchExecution.scala:584)
at org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:357)
at org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:355)
at org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:68)
at org.apache.spark.sql.execution.streaming.MicroBatchExecution.runBatch(MicroBatchExecution.scala:584)
at org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$2(MicroBatchExecution.scala:226)
at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
at org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:357)
at org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:355)
at org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:68)
at org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$1(MicroBatchExecution.scala:194)
at org.apache.spark.sql.execution.streaming.OneTimeExecutor.execute(TriggerExecutor.scala:39)
at org.apache.spark.sql.execution.streaming.MicroBatchExecution.runActivatedStream(MicroBatchExecution.scala:188)
at org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:333)
at org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:244)
I read something in the official documentation regarding the way that spark 3.1.1 handles the maps, arrays, etc:
In Spark 3.1, structs and maps are wrapped by the {} brackets in
casting them to strings. For instance, the show() action and the CAST
expression use such brackets. In Spark 3.0 and earlier, the []
brackets are used for the same purpose. To restore the behavior before
Spark 3.1, you can set
spark.sql.legacy.castComplexTypesToString.enabled to true.
In Spark 3.1, NULL elements of structures, arrays and maps are
converted to “null” in casting them to strings. In Spark 3.0 or
earlier, NULL elements are converted to empty strings. To restore the
behavior before Spark 3.1, you can set
spark.sql.legacy.castComplexTypesToString.enabled to true.
I have enabled this, but still no luck. I'm removing all the maps and arrays from my dataframe meanwhile.
Any idea?

Related

Alter table tablename CONCATENATE error via the Databricks notebook

I want to schedule the Databricks notebook which merges small ORC files to one bigger ORC file on a daily basis for a particular hive table. I'm looking to implement this using Spark, but currently stuck with the error as shown below.
My databricks runtime: 6.3 (includes Apache Spark 2.4.4, Scala 2.11)
Any pointers would be great.
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
val spark = SparkSession.builder().appName("Hive small ORC files merge").enableHiveSupport().getOrCreate()
spark.sql("ALTER TABLE [TABLE_NAME] CONCATENATE")
Error:
`Operation not allowed: ALTER TABLE CONCATENATE(line 1, pos 0)
== SQL ==ALTER TABLE CONCATENATE^^^
at org.apache.spark.sql.catalyst.parser.ParserUtils$.operationNotAllowed(ParserUtils.scala:43)
at org.apache.spark.sql.execution.SparkSqlAstBuilder$$anonfun$visitFailNativeCommand$1.apply(SparkSqlParser.scala:1135)
at org.apache.spark.sql.execution.SparkSqlAstBuilder$$anonfun$visitFailNativeCommand$1.apply(SparkSqlParser.scala:1126)
at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:110)
at org.apache.spark.sql.execution.SparkSqlAstBuilder.visitFailNativeCommand(SparkSqlParser.scala:1126)
at org.apache.spark.sql.execution.SparkSqlAstBuilder.visitFailNativeCommand(SparkSqlParser.scala:62)
at org.apache.spark.sql.catalyst.parser.SqlBaseParser$FailNativeCommandContext.accept(SqlBaseParser.java:831)
at org.antlr.v4.runtime.tree.AbstractParseTreeVisitor.visit(AbstractParseTreeVisitor.java:18)
at org.apache.spark.sql.catalyst.parser.AstBuilder$$anonfun$visitSingleStatement$1.apply(AstBuilder.scala:74)
at org.apache.spark.sql.catalyst.parser.AstBuilder$$anonfun$visitSingleStatement$1.apply(AstBuilder.scala:74)
at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:110)
at org.apache.spark.sql.catalyst.parser.AstBuilder.visitSingleStatement(AstBuilder.scala:73)
at org.apache.spark.sql.catalyst.parser.AbstractSqlParser$$anonfun$parsePlan$1.apply(ParseDriver.scala:70)
at org.apache.spark.sql.catalyst.parser.AbstractSqlParser$$anonfun$parsePlan$1.apply(ParseDriver.scala:69)
at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parse(ParseDriver.scala:100)
at org.apache.spark.sql.execution.SparkSqlParser.parse(SparkSqlParser.scala:55)
at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parsePlan(ParseDriver.scala:69)
at com.databricks.sql.parser.DatabricksSqlParser$$anonfun$parsePlan$1.apply(DatabricksSqlParser.scala:64)
at com.databricks.sql.parser.DatabricksSqlParser$$anonfun$parsePlan$1.apply(DatabricksSqlParser.scala:61)
at com.databricks.sql.parser.DatabricksSqlParser.parse(DatabricksSqlParser.scala:84)
at com.databricks.sql.parser.DatabricksSqlParser.parsePlan(DatabricksSqlParser.scala:61)
at org.apache.spark.sql.SparkSession$$anonfun$6.apply(SparkSession.scala:694)
at org.apache.spark.sql.SparkSession$$anonfun$6.apply(SparkSession.scala:694)
at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:111)
at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:693)
at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:716)
at line341589a136f246f788b6b288061c96ae31.$read$$iw$$iw$$iw$$iw$$iw$$iw.<init>(command-4297307810790143:4)
at line341589a136f246f788b6b288061c96ae31.$read$$iw$$iw$$iw$$iw$$iw.<init>(command-4297307810790143:50)
at line341589a136f246f788b6b288061c96ae31.$read$$iw$$iw$$iw$$iw.<init>(command-4297307810790143:52)
at line341589a136f246f788b6b288061c96ae31.$read$$iw$$iw$$iw.<init>(command-4297307810790143:54)
at line341589a136f246f788b6b288061c96ae31.$read$$iw$$iw.<init>(command-4297307810790143:56)
at line341589a136f246f788b6b288061c96ae31.$read$$iw.<init>(command-4297307810790143:58)
at line341589a136f246f788b6b288061c96ae31.$read.<init>(command-4297307810790143:60)
at line341589a136f246f788b6b288061c96ae31.$read$.<init>(command-4297307810790143:64)
at line341589a136f246f788b6b288061c96ae31.$read$.<clinit>(command-4297307810790143)
at line341589a136f246f788b6b288061c96ae31.$eval$.$print$lzycompute(<notebook>:7)
at line341589a136f246f788b6b288061c96ae31.$eval$.$print(<notebook>:6)
at line341589a136f246f788b6b288061c96ae31.$eval.$print(<notebook>)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at scala.tools.nsc.interpreter.IMain$ReadEvalPrint.call(IMain.scala:793)
at scala.tools.nsc.interpreter.IMain$Request.loadAndRun(IMain.scala:1054)
at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:645)
at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:644)
at scala.reflect.internal.util.ScalaClassLoader$class.asContext(ScalaClassLoader.scala:31)
at scala.reflect.internal.util.AbstractFileClassLoader.asContext(AbstractFileClassLoader.scala:19)
at scala.tools.nsc.interpreter.IMain$WrappedRequest.loadAndRunReq(IMain.scala:644)
at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:576)
at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:572)
at com.databricks.backend.daemon.driver.DriverILoop.execute(DriverILoop.scala:215)
at com.databricks.backend.daemon.driver.ScalaDriverLocal$$anonfun$repl$1.apply$mcV$sp(ScalaDriverLocal.scala:202)
at com.databricks.backend.daemon.driver.ScalaDriverLocal$$anonfun$repl$1.apply(ScalaDriverLocal.scala:202)
at com.databricks.backend.daemon.driver.ScalaDriverLocal$$anonfun$repl$1.apply(ScalaDriverLocal.scala:202)
at com.databricks.backend.daemon.driver.DriverLocal$TrapExitInternal$.trapExit(DriverLocal.scala:699)
at com.databricks.backend.daemon.driver.DriverLocal$TrapExit$.apply(DriverLocal.scala:652)
at com.databricks.backend.daemon.driver.ScalaDriverLocal.repl(ScalaDriverLocal.scala:202)
at com.databricks.backend.daemon.driver.DriverLocal$$anonfun$execute$9.apply(DriverLocal.scala:385)
at com.databricks.backend.daemon.driver.DriverLocal$$anonfun$execute$9.apply(DriverLocal.scala:362)
at com.databricks.logging.UsageLogging$$anonfun$withAttributionContext$1.apply(UsageLogging.scala:251)
at scala.util.DynamicVariable.withValue(DynamicVariable.scala:58)
at com.databricks.logging.UsageLogging$class.withAttributionContext(UsageLogging.scala:246)
at com.databricks.backend.daemon.driver.DriverLocal.withAttributionContext(DriverLocal.scala:49)
at com.databricks.logging.UsageLogging$class.withAttributionTags(UsageLogging.scala:288)
at com.databricks.backend.daemon.driver.DriverLocal.withAttributionTags(DriverLocal.scala:49)
at com.databricks.backend.daemon.driver.DriverLocal.execute(DriverLocal.scala:362)
at com.databricks.backend.daemon.driver.DriverWrapper$$anonfun$tryExecutingCommand$2.apply(DriverWrapper.scala:644)
at com.databricks.backend.daemon.driver.DriverWrapper$$anonfun$tryExecutingCommand$2.apply(DriverWrapper.scala:644)
at scala.util.Try$.apply(Try.scala:192)
at com.databricks.backend.daemon.driver.DriverWrapper.tryExecutingCommand(DriverWrapper.scala:639)
at com.databricks.backend.daemon.driver.DriverWrapper.getCommandOutputAndError(DriverWrapper.scala:485)
at com.databricks.backend.daemon.driver.DriverWrapper.executeCommand(DriverWrapper.scala:597)
at com.databricks.backend.daemon.driver.DriverWrapper.runInnerLoop(DriverWrapper.scala:390)
at com.databricks.backend.daemon.driver.DriverWrapper.runInner(DriverWrapper.scala:337)
at com.databricks.backend.daemon.driver.DriverWrapper.run(DriverWrapper.scala:219)
at java.lang.Thread.run(Thread.java:748)`
As per spark source code, ALTER TABLE <> CONCATENATE option is not implemented or not supported as of now. Please check below code for more information.
Spark SQL Parser
Spark Un Supported Hive Native Commands
This command works from Hive only:
ALTER TABLE <table_name> CONCATENATE;
Does not work from Spark, yet.

Getting Null Pointer exception while performing operations on dataframe spark

I am using following code to create dataframe from RDD. I am able to perform operations on RDD and RDD is not empty.
I tried out following two approaches.
With both I am getting same exception.
Approach 1: Build dataset using sparkSession.createDataframe().
System.out.println("RDD Count: " + rdd.count());
Dataset<Row> rows = applicationSession
.getSparkSession().createDataFrame(rdd, data.getSchema()).toDF(data.convertListToSeq(data.getColumnNames()));
rows.createOrReplaceTempView(createStagingTableName(sparkTableName));
rows.show();
rows.printSchema();
Approach 2: Use Hive Context to create dataset.
System.out.println("RDD Count: " + rdd.count());
System.out.println("Create view using HiveContext..");
Dataset<Row> rows = applicationSession.gethiveContext().applySchema(rdd, data.getSchema());
I am able to print schema for above dataset using both apporaches.
Not sure what exactly causing null pointer exception.
Show() method internally invokes take() method which is throwing null pointer exception.
But why this dataset is populated as NULL? if RDD contains values then it should not be null.
This is a strange behaviour.
Below are logs for the same.
RDD Count: 35
Also I am able to run above code in local mode without any exception it is working fine.
As soon as I deploy this code on Yarn I start getting following exception.
I am able to create dataframe even I am able to register view for the same.
As soon as I perfrom rows.show() or rows.count() operation on this dataset I am getting following error.
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1517)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1505)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1504)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1504)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1732)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1687)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1676)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2029)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2050)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2069)
at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:336)
at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2861)
at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2150)
at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2150)
at org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2842)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2841)
at org.apache.spark.sql.Dataset.head(Dataset.scala:2150)
at org.apache.spark.sql.Dataset.take(Dataset.scala:2363)
at org.apache.spark.sql.Dataset.showString(Dataset.scala:241)
at org.apache.spark.sql.Dataset.show(Dataset.scala:637)
at org.apache.spark.sql.Dataset.show(Dataset.scala:596)
at org.apache.spark.sql.Dataset.show(Dataset.scala:605)
Caused by: java.lang.NullPointerException
at org.apache.spark.sql.SparkSession$$anonfun$3.apply(SparkSession.scala:469)
at org.apache.spark.sql.SparkSession$$anonfun$3.apply(SparkSession.scala:469)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:409)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:235)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:228)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:108)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Am I doing anything wrong here?
Please suggest.
Can you post the schema for dataframe? Issue is with schema string you are using and separator that you are using to split the schema string.

Spark SQL coalesce function fails to evaluate

I am doing an outer join between a source dataframe and a smaller "overrides" dataframe, and I'd like to use the coalesce function:
val outputColumns: Array[Column] = dimensionColumns.map(dc => etlDf(dc)).union(attributeColumns.map(ac => coalesce(overrideDf(ac), etlDf(ac))))
etlDf.join(overrideDf, childColumns, "left").select(outputColumns:_*)
When it comes time to write the resulting dataframe to a parquet file, I am receiving the following exception:
org.apache.spark.sql.AnalysisException: Attribute name "coalesce(top_customer_fg, top_customer_fg)" contains invalid character(s) among " ,;{}()\n\t=". Please use alias to rename it.;
at org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter$.checkConversionRequirement(ParquetSchemaConverter.scala:581)
at org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter$.checkFieldName(ParquetSchemaConverter.scala:567)
at org.apache.spark.sql.execution.datasources.parquet.ParquetWriteSupport$$anonfun$setSchema$2.apply(ParquetWriteSupport.scala:431)
at org.apache.spark.sql.execution.datasources.parquet.ParquetWriteSupport$$anonfun$setSchema$2.apply(ParquetWriteSupport.scala:431)
at scala.collection.immutable.List.foreach(List.scala:381)
at org.apache.spark.sql.execution.datasources.parquet.ParquetWriteSupport$.setSchema(ParquetWriteSupport.scala:431)
at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat.prepareWrite(ParquetFileFormat.scala:115)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:108)
at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:101)
at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:113)
at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:87)
at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:87)
at org.apache.spark.sql.execution.datasources.DataSource.writeInFileFormat(DataSource.scala:484)
at org.apache.spark.sql.execution.datasources.DataSource.write(DataSource.scala:520)
at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:215)
at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:198)
at org.apache.spark.sql.DataFrameWriter.parquet(DataFrameWriter.scala:494)
at com.mycompany.customattributes.ProgramImplementation$StandardProgram.createAttributeFiles(ProgramImplementation.scala:63)
So even though the coalesce function returns a column, it appears it is evaluated as a literal column name. This seems unexpected to me.
Is there a syntax mistake I'm making here, or do I need to take a different approach?
Thanks.

Spark Dataframe IN clause is throwing error

I am trying to apply IN clause in spark dataframe
scala> val filteredDF = resultDF.select("role_id","role","full_name").filter(upper(resultDF("role")).isin(List("DIRECTOR","ACTOR")) )
While trying the above command I am getting the error
java.lang.RuntimeException: Unsupported literal type class scala.collection.immutable.$colon$colon List(DIRECTOR, ACTOR)
at org.apache.spark.sql.catalyst.expressions.Literal$.apply(literals.scala:49)
at org.apache.spark.sql.functions$.lit(functions.scala:89)
at org.apache.spark.sql.Column$$anonfun$isin$1.apply(Column.scala:642)
at org.apache.spark.sql.Column$$anonfun$isin$1.apply(Column.scala:642)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:34)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
at scala.collection.AbstractTraversable.map(Traversable.scala:105)
at org.apache.spark.sql.Column.isin(Column.scala:642)
Could some one help me on explaining why I am getting this error and How do i fix this ?
You need to pass values as separate arguments to isin:
.isin("DIRECTOR", "ACTOR")
Or use varargs syntax:
.isin(List("DIRECTOR", "ACTOR"): _*)

Spark 2.0 with spark.read.text Expected scheme-specific part at index 3: s3: error

I am running into a weird issue with spark 2.0, using the sparksession to load a text file. Currently my spark config looks like:
val sparkConf = new SparkConf().setAppName("name-here")
sparkConf.registerKryoClasses(Array(Class.forName("org.apache.hadoop.io.LongWritable"), Class.forName("org.apache.hadoop.io.Text")))
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
val spark = SparkSession.builder()
.config(sparkConf)
.getOrCreate()
spark.sparkContext.hadoopConfiguration.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
spark.sparkContext.hadoopConfiguration.set("fs.s3a.enableServerSideEncryption", "true")
spark.sparkContext.hadoopConfiguration.set("mapreduce.fileoutputcommitter.algorithm.version", "2")
If I load an s3a file through an rdd, it works fine. However, if I try to use something like:
val blah = SparkConfig.spark.read.text("s3a://bucket-name/*/*.txt")
.select(input_file_name, col("value"))
.drop("value")
.distinct()
val x = blah.collect()
println(blah.head().get(0))
println(x.size)
I get an exception that says: java.net.URISyntaxException: Expected scheme-specific part at index 3: s3:
Do I need to add some addition s3a configuration for the sqlcontext or sparksession? I haven't found any question or online resource that specifies this. What is weird is that it seems like the job runs for 10 minutes, but then fails with this exception. Again, using the same bucket and everything, a regular load of an rdd has no issues.
The other weird thing is that it is complaining about s3 and not s3a. I have triple checked my prefix, and it always says s3a.
Edit: Checked both s3a and s3, both throw the same exception.
17/04/06 21:29:14 ERROR ApplicationMaster: User class threw exception:
java.lang.IllegalArgumentException: java.net.URISyntaxException:
Expected scheme-specific part at index 3: s3:
java.lang.IllegalArgumentException: java.net.URISyntaxException:
Expected scheme-specific part at index 3: s3:
at org.apache.hadoop.fs.Path.initialize(Path.java:205)
at org.apache.hadoop.fs.Path.<init>(Path.java:171)
at org.apache.hadoop.fs.Path.<init>(Path.java:93)
at org.apache.hadoop.fs.Globber.glob(Globber.java:240)
at org.apache.hadoop.fs.FileSystem.globStatus(FileSystem.java:1732)
at org.apache.hadoop.fs.FileSystem.globStatus(FileSystem.java:1713)
at org.apache.spark.deploy.SparkHadoopUtil.globPath(SparkHadoopUtil.scala:237)
at org.apache.spark.deploy.SparkHadoopUtil.globPathIfNecessary(SparkHadoopUtil.scala:243)
at org.apache.spark.sql.execution.datasources.DataSource$$anonfun$14.apply(DataSource.scala:374)
at org.apache.spark.sql.execution.datasources.DataSource$$anonfun$14.apply(DataSource.scala:370)
at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
at scala.collection.immutable.List.foreach(List.scala:381)
at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)
at scala.collection.immutable.List.flatMap(List.scala:344)
at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:370)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:152)
at org.apache.spark.sql.DataFrameReader.text(DataFrameReader.scala:506)
at org.apache.spark.sql.DataFrameReader.text(DataFrameReader.scala:486)
at com.omitted.omitted.jobs.Omitted$.doThings(Omitted.scala:18)
at com.omitted.omitted.jobs.Omitted$.main(Omitted.scala:93)
at com.omitted.omitted.jobs.Omitted.main(Omitted.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.spark.deploy.yarn.ApplicationMaster$$anon$2.run(ApplicationMaster.scala:637)
Caused by: java.net.URISyntaxException: Expected scheme-specific part
at index 3: s3:
at java.net.URI$Parser.fail(URI.java:2848)
at java.net.URI$Parser.failExpecting(URI.java:2854)
at java.net.URI$Parser.parse(URI.java:3057)
at java.net.URI.<init>(URI.java:746)
at org.apache.hadoop.fs.Path.initialize(Path.java:202)
... 26 more
17/04/06 21:29:14 INFO ApplicationMaster: Final app status: FAILED,
exitCode: 15, (reason: User class threw exception:
java.lang.IllegalArgumentException: java.net.URISyntaxException:
Expected scheme-specific part at index 3: s3:)
This should work.
get the right JARs on your CP (Spark with Hadoop 2.7, matching hadoop-aws JAR, aws-java-sdk-1.7.4.jar (exactly this version) and joda-time-2.9.3.jar (or a later vesion)
you shouldn't need to set the fs.s3a.impl value, as that's done in the hadoop default settings. If you do find yourself doing that, it's a sign of a problem.
What's the full stack trace?

Resources