I need to detect a language by text, and translate that text using PySpark. I could not find any functions for this in PySpark so I created my own UDF's.
Language detection
def detectlang(string):
b = TextBlob(string)
return b.detect_language()
detectlang_udf = udf(detectlang)
Translation
def translate(string):
trans = Translator()
return trans.translate(string).text
translate_udf = udf(translate, StringType())
However when I call these functions and then ask for the result I get the following error:
result = dict_comments[13].withColumn("lang", detectlang_udf(col('Text')))
result.show()
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 15.0 failed 1 times, most recent failure: Lost task 0.0 in stage 15.0 (TID 15, localhost, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
Edit (full error)
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
<command-256375544159477> in <module>
1 result = dict_comments[13].withColumn("lang", detectlang_udf(col('Text')))
----> 2 result.show()
/databricks/spark/python/pyspark/sql/dataframe.py in show(self, n, truncate, vertical)
379 """
380 if isinstance(truncate, bool) and truncate:
--> 381 print(self._jdf.showString(n, 20, vertical))
382 else:
383 print(self._jdf.showString(n, int(truncate), vertical))
/databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py in __call__(self, *args)
1255 answer = self.gateway_client.send_command(command)
1256 return_value = get_return_value(
-> 1257 answer, self.gateway_client, self.target_id, self.name)
1258
1259 for temp_arg in temp_args:
/databricks/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
61 def deco(*a, **kw):
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
65 s = e.java_exception.toString()
/databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling o872.showString.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 15.0 failed 1 times, most recent failure: Lost task 0.0 in stage 15.0 (TID 15, localhost, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/databricks/spark/python/pyspark/worker.py", line 480, in main
process()
File "/databricks/spark/python/pyspark/worker.py", line 472, in process
serializer.dump_stream(out_iter, outfile)
File "/databricks/spark/python/pyspark/serializers.py", line 456, in dump_stream
self.serializer.dump_stream(self._batched(iterator), stream)
File "/databricks/spark/python/pyspark/serializers.py", line 149, in dump_stream
for obj in iterator:
File "/databricks/spark/python/pyspark/serializers.py", line 445, in _batched
for item in iterator:
File "<string>", line 1, in <lambda>
File "/databricks/spark/python/pyspark/worker.py", line 87, in <lambda>
return lambda *a: f(*a)
File "/databricks/spark/python/pyspark/util.py", line 99, in wrapper
return f(*args, **kwargs)
File "<command-256375544159470>", line 3, in detectlang
File "/databricks/python/lib/python3.7/site-packages/textblob/blob.py", line 568, in detect_language
return self.translator.detect(self.raw)
File "/databricks/python/lib/python3.7/site-packages/textblob/translate.py", line 69, in detect
raise TranslatorError('Must provide a string with at least 3 characters.')
textblob.exceptions.TranslatorError: Must provide a string with at least 3 characters.
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:534)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:488)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:640)
at org.apache.spark.sql.execution.collect.UnsafeRowBatchUtils$.encodeUnsafeRows(UnsafeRowBatchUtils.scala:62)
at org.apache.spark.sql.execution.collect.Collector$$anonfun$2.apply(Collector.scala:159)
at org.apache.spark.sql.execution.collect.Collector$$anonfun$2.apply(Collector.scala:158)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.doRunTask(Task.scala:140)
at org.apache.spark.scheduler.Task.run(Task.scala:113)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:528)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1526)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:534)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:2360)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2348)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2347)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2347)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1101)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1101)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1101)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2579)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2527)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2515)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:896)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2280)
at org.apache.spark.sql.execution.collect.Collector.runSparkJobs(Collector.scala:270)
at org.apache.spark.sql.execution.collect.Collector.collect(Collector.scala:280)
at org.apache.spark.sql.execution.collect.Collector$.collect(Collector.scala:80)
at org.apache.spark.sql.execution.collect.Collector$.collect(Collector.scala:86)
at org.apache.spark.sql.execution.ResultCacheManager.getOrComputeResult(ResultCacheManager.scala:508)
at org.apache.spark.sql.execution.CollectLimitExec.executeCollectResult(limit.scala:55)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectResult(Dataset.scala:2889)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3501)
at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2618)
at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2618)
at org.apache.spark.sql.Dataset$$anonfun$54.apply(Dataset.scala:3485)
at org.apache.spark.sql.Dataset$$anonfun$54.apply(Dataset.scala:3480)
at org.apache.spark.sql.execution.SQLExecution$$anonfun$withCustomExecutionEnv$1.apply(SQLExecution.scala:111)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:240)
at org.apache.spark.sql.execution.SQLExecution$.withCustomExecutionEnv(SQLExecution.scala:97)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:170)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$withAction(Dataset.scala:3480)
at org.apache.spark.sql.Dataset.head(Dataset.scala:2618)
at org.apache.spark.sql.Dataset.take(Dataset.scala:2832)
at org.apache.spark.sql.Dataset.getRows(Dataset.scala:265)
at org.apache.spark.sql.Dataset.showString(Dataset.scala:302)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:380)
at py4j.Gateway.invoke(Gateway.java:295)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:251)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/databricks/spark/python/pyspark/worker.py", line 480, in main
process()
File "/databricks/spark/python/pyspark/worker.py", line 472, in process
serializer.dump_stream(out_iter, outfile)
File "/databricks/spark/python/pyspark/serializers.py", line 456, in dump_stream
self.serializer.dump_stream(self._batched(iterator), stream)
File "/databricks/spark/python/pyspark/serializers.py", line 149, in dump_stream
for obj in iterator:
File "/databricks/spark/python/pyspark/serializers.py", line 445, in _batched
for item in iterator:
File "<string>", line 1, in <lambda>
File "/databricks/spark/python/pyspark/worker.py", line 87, in <lambda>
return lambda *a: f(*a)
File "/databricks/spark/python/pyspark/util.py", line 99, in wrapper
return f(*args, **kwargs)
File "<command-256375544159470>", line 3, in detectlang
File "/databricks/python/lib/python3.7/site-packages/textblob/blob.py", line 568, in detect_language
return self.translator.detect(self.raw)
File "/databricks/python/lib/python3.7/site-packages/textblob/translate.py", line 69, in detect
raise TranslatorError('Must provide a string with at least 3 characters.')
textblob.exceptions.TranslatorError: Must provide a string with at least 3 characters.
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:534)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:488)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:640)
at org.apache.spark.sql.execution.collect.UnsafeRowBatchUtils$.encodeUnsafeRows(UnsafeRowBatchUtils.scala:62)
at org.apache.spark.sql.execution.collect.Collector$$anonfun$2.apply(Collector.scala:159)
at org.apache.spark.sql.execution.collect.Collector$$anonfun$2.apply(Collector.scala:158)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.doRunTask(Task.scala:140)
at org.apache.spark.scheduler.Task.run(Task.scala:113)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:528)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1526)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:534)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
... 1 more
Does anybody know how to resolve this or know of any pre-implemented PySpark functions for my goal?
Based on the latest edit, here is the source of your task failure.
File "/databricks/python/lib/python3.7/site-packages/textblob/translate.py", line 69, in detect
raise TranslatorError('Must provide a string with at least 3 characters.')
textblob.exceptions.TranslatorError: Must provide a string with at least 3 characters.
Spark can sometimes be quite unhelpful in providing errors, so here you need to search all the way for your python exception, as the driver will only say something like Task Lost or Task Failed.
A quick fix to your function is then checking on the len() of the input string, or adding a f.when().otherwise() in pyspark on your function call - this might be more desirable as you will not execute your udf when not needed.
Hope this helps!
Related
I have the following pyspark (databricks on Azure) code :
# load exchange data
df_ex = spark.read.format("csv").load(xxx.csv, inferSchema = True, header = True)
# udf
get_country = udf( lambda x : pycountry.countries.get(alpha_2=x).name )
# clean exchange data
clean_df_ex = df_ex.select(["EQUITY EXCH CODE","EQUITY EXCH NAME","Composite Code","ISO COUNTRY"])\
.withColumn("COUNTRY", get_country(col("ISO COUNTRY")) )
# convert 2 columns to new json column
df_list_of_dict = clean_df_ex.withColumn("dict_value", to_json(struct(col("EQUITY EXCH CODE"), col("COUNTRY"))))
# final df, list of dicts
df_list = df_list_of_dict.select("dict_value")
So far everything will work perfect, and I can do show() or take()
for example :
if I do df_list.take(2) , I will get the values I expect.
my main goal is to iterate through the new df, and create a list.
for example, using take() will work with no issues:
mylist = [ i.dict_value for i in df_list.take(5) ]
mylist
The result :
['{"EQUITY EXCH CODE":"AJ","COUNTRY":"South Africa"}',
'{"EQUITY EXCH CODE":"PF","COUNTRY":"Australia"}',
'{"EQUITY EXCH CODE":"UP","COUNTRY":"United States"}',
'{"EQUITY EXCH CODE":"AQ","COUNTRY":"Australia"}',
'{"EQUITY EXCH CODE":"QE","COUNTRY":"France"}']
however, if I try to collect() instead of take() i will get the following error :
the code :
mylist = [ i.dict_value for i in df_list.collect() ]
mylist
The error :
Py4JJavaError Traceback (most recent call last)
<command-3895085882512910> in <module>
1 # this cod is the correct way to do it but it won't work
----> 2 for i in df_list.collect():
3 print(i.dict_value)
4
/databricks/spark/python/pyspark/sql/dataframe.py in collect(self)
552 # Default path used in OSS Spark / for non-DF-ACL clusters:
553 with SCCallSiteSync(self._sc) as css:
--> 554 sock_info = self._jdf.collectToPython()
555 return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
556
/databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py in __call__(self, *args)
1255 answer = self.gateway_client.send_command(command)
1256 return_value = get_return_value(
-> 1257 answer, self.gateway_client, self.target_id, self.name)
1258
1259 for temp_arg in temp_args:
/databricks/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
61 def deco(*a, **kw):
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
65 s = e.java_exception.toString()
/databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling o14499.collectToPython.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 157.0 failed 4 times, most recent failure: Lost task 0.3 in stage 157.0 (TID 330, 10.139.64.5, executor 1): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/databricks/spark/python/pyspark/worker.py", line 480, in main
process()
File "/databricks/spark/python/pyspark/worker.py", line 472, in process
serializer.dump_stream(out_iter, outfile)
File "/databricks/spark/python/pyspark/serializers.py", line 460, in dump_stream
self.serializer.dump_stream(self._batched(iterator), stream)
File "/databricks/spark/python/pyspark/serializers.py", line 150, in dump_stream
for obj in iterator:
File "/databricks/spark/python/pyspark/serializers.py", line 449, in _batched
for item in iterator:
File "<string>", line 1, in <lambda>
File "/databricks/spark/python/pyspark/worker.py", line 87, in <lambda>
return lambda *a: f(*a)
File "/databricks/spark/python/pyspark/util.py", line 99, in wrapper
return f(*args, **kwargs)
File "<command-2765369177614916>", line 1, in <lambda>
AttributeError: 'NoneType' object has no attribute 'name'
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:540)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:494)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.sql.execution.collect.UnsafeRowBatchUtils$.encodeUnsafeRows(UnsafeRowBatchUtils.scala:62)
at org.apache.spark.sql.execution.collect.Collector$$anonfun$1.apply(Collector.scala:151)
at org.apache.spark.sql.execution.collect.Collector$$anonfun$1.apply(Collector.scala:150)
at org.apache.spark.SparkContext$$anonfun$41.apply(SparkContext.scala:2377)
at org.apache.spark.SparkContext$$anonfun$41.apply(SparkContext.scala:2377)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.doRunTask(Task.scala:140)
at org.apache.spark.scheduler.Task.run(Task.scala:113)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:537)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1541)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:543)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:2362)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2350)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2349)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2349)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1102)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1102)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1102)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2582)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2529)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2517)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:897)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2280)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2378)
at org.apache.spark.sql.execution.collect.Collector.runSparkJobs(Collector.scala:245)
at org.apache.spark.sql.execution.collect.Collector.collect(Collector.scala:280)
at org.apache.spark.sql.execution.collect.Collector$.collect(Collector.scala:80)
at org.apache.spark.sql.execution.collect.Collector$.collect(Collector.scala:86)
at org.apache.spark.sql.execution.ResultCacheManager.getOrComputeResult(ResultCacheManager.scala:508)
at org.apache.spark.sql.execution.ResultCacheManager.getOrComputeResult(ResultCacheManager.scala:480)
at org.apache.spark.sql.execution.SparkPlan.executeCollectResult(SparkPlan.scala:328)
at org.apache.spark.sql.Dataset$$anonfun$50.apply(Dataset.scala:3367)
at org.apache.spark.sql.Dataset$$anonfun$50.apply(Dataset.scala:3366)
at org.apache.spark.sql.Dataset$$anonfun$54.apply(Dataset.scala:3501)
at org.apache.spark.sql.Dataset$$anonfun$54.apply(Dataset.scala:3496)
at org.apache.spark.sql.execution.SQLExecution$$anonfun$withCustomExecutionEnv$1$$anonfun$apply$1.apply(SQLExecution.scala:112)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:217)
at org.apache.spark.sql.execution.SQLExecution$$anonfun$withCustomExecutionEnv$1.apply(SQLExecution.scala:98)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:835)
at org.apache.spark.sql.execution.SQLExecution$.withCustomExecutionEnv(SQLExecution.scala:74)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:169)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$withAction(Dataset.scala:3496)
at org.apache.spark.sql.Dataset.collectToPython(Dataset.scala:3366)
at sun.reflect.GeneratedMethodAccessor521.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:380)
at py4j.Gateway.invoke(Gateway.java:295)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:251)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/databricks/spark/python/pyspark/worker.py", line 480, in main
process()
File "/databricks/spark/python/pyspark/worker.py", line 472, in process
serializer.dump_stream(out_iter, outfile)
File "/databricks/spark/python/pyspark/serializers.py", line 460, in dump_stream
self.serializer.dump_stream(self._batched(iterator), stream)
File "/databricks/spark/python/pyspark/serializers.py", line 150, in dump_stream
for obj in iterator:
File "/databricks/spark/python/pyspark/serializers.py", line 449, in _batched
for item in iterator:
File "<string>", line 1, in <lambda>
File "/databricks/spark/python/pyspark/worker.py", line 87, in <lambda>
return lambda *a: f(*a)
File "/databricks/spark/python/pyspark/util.py", line 99, in wrapper
return f(*args, **kwargs)
File "<command-2765369177614916>", line 1, in <lambda>
AttributeError: 'NoneType' object has no attribute 'name'
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:540)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:494)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.sql.execution.collect.UnsafeRowBatchUtils$.encodeUnsafeRows(UnsafeRowBatchUtils.scala:62)
at org.apache.spark.sql.execution.collect.Collector$$anonfun$1.apply(Collector.scala:151)
at org.apache.spark.sql.execution.collect.Collector$$anonfun$1.apply(Collector.scala:150)
at org.apache.spark.SparkContext$$anonfun$41.apply(SparkContext.scala:2377)
at org.apache.spark.SparkContext$$anonfun$41.apply(SparkContext.scala:2377)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.doRunTask(Task.scala:140)
at org.apache.spark.scheduler.Task.run(Task.scala:113)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:537)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1541)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:543)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
... 1 more
Update :
I can confirm that the issue happens because when I call collect() udf (country), or include that column coming from udf (Country) in SQL projection.
AttributeError: 'NoneType' object has no attribute 'name'
Basically, it throw an error that it couldn't;t find attar name in 3rd part python module I'm using (pycountry).
I can confirm the attribute (name) is exist, for example :
pycountry.countries.get(alpha_2="DE").name >> will out out Germany
As a work around :
I built a dictionary, then used it in my udf and it seems work find now.
country_dict = { i.alpha_2: i.name for i in list(pycountry.countries)}
then use it as :
udf_get_country = udf( lambda x : country_dict.get(x, "No Country") , StringType())
I'm still curious to understand what happened
I am trying to parse some text using spacy to get word dependencies. I am running PySpark in Anaconda with Jupyter notebooks.
Python version: 3.7.5
PySpark version: 2.4.4
Spacy version: 2.2.5
Anaconda version: 4.7.12
Jupyter version: 6.0.2
Here's a MVCE for the error:
import spacy
import en_core_web_sm
from pyspark.sql.functions import *
from pyspark.sql.types import *
def get_token_dep(text):
if text:
nlp = en_core_web_sm.load()
return [(token.text, token.tag_, token.head.text, token.dep_) for token in nlp(text)]
else:
return [['N/A']]
get_token_dep_udf = udf(get_token_dep, ArrayType(ArrayType(StringType())))
text_list = ['Chocolate is a food made from cacao beans.', 'Dessert is a course that concludes a meal.']
text_df = spark.createDataFrame(text_list, StringType())
text_df = text_df.withColumnRenamed(
'value', 'text'
).withColumn(
'parsed_text', get_token_dep_udf('text')
)
display(text_df.toPandas())
However, I am getting errors as follows:
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
<ipython-input-14-bc4e37a4051a> in <module>
----> 1 display(text_df.toPandas())
~\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\pyspark\sql\dataframe.py in toPandas(self)
2141
2142 # Below is toPandas without Arrow optimization.
-> 2143 pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
2144
2145 dtype = {}
~\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\pyspark\sql\dataframe.py in collect(self)
532 """
533 with SCCallSiteSync(self._sc) as css:
--> 534 sock_info = self._jdf.collectToPython()
535 return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
536
~\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\py4j\java_gateway.py in __call__(self, *args)
1255 answer = self.gateway_client.send_command(command)
1256 return_value = get_return_value(
-> 1257 answer, self.gateway_client, self.target_id, self.name)
1258
1259 for temp_arg in temp_args:
~\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\pyspark\sql\utils.py in deco(*a, **kw)
61 def deco(*a, **kw):
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
65 s = e.java_exception.toString()
~\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\py4j\protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling o147.collectToPython.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 7 in stage 11.0 failed 1 times, most recent failure: Lost task 7.0 in stage 11.0 (TID 47, localhost, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\catalogue.py", line 8, in <module>
import importlib.metadata as importlib_metadata
ModuleNotFoundError: No module named 'importlib.metadata'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 366, in main
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 241, in read_udfs
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 168, in read_single_udf
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 69, in read_command
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\serializers.py", line 172, in _read_with_length
return self.loads(obj)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\serializers.py", line 580, in loads
return pickle.loads(obj, encoding=encoding)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\cloudpickle.py", line 875, in subimport
__import__(name)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\en_core_web_sm\__init__.py", line 5, in <module>
from spacy.util import load_model_from_init_py, get_model_meta
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\spacy\__init__.py", line 12, in <module>
from . import pipeline
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\spacy\pipeline\__init__.py", line 4, in <module>
from .pipes import Tagger, DependencyParser, EntityRecognizer, EntityLinker
File "pipes.pyx", line 1, in init spacy.pipeline.pipes
File "strings.pxd", line 23, in init spacy.syntax.nn_parser
File "strings.pyx", line 17, in init spacy.strings
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\spacy\util.py", line 16, in <module>
import catalogue
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\catalogue.py", line 10, in <module>
import importlib_metadata
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 547, in <module>
__version__ = version(__name__)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 509, in version
return distribution(distribution_name).version
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 482, in distribution
return Distribution.from_name(distribution_name)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 183, in from_name
dist = next(dists, None)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 425, in <genexpr>
for path in map(cls._switch_path, paths)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 449, in _search_path
if not root.is_dir():
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\pathlib.py", line 1358, in is_dir
return S_ISDIR(self.stat().st_mode)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\pathlib.py", line 1168, in stat
return self._accessor.stat(self)
OSError: [WinError 123] The filename, directory name, or volume label syntax is incorrect: 'C:\\C:\\Users\\user1\\AppData\\Local\\Continuum\\anaconda3\\envs\\py37\\Lib\\site-packages\\pyspark\\jars\\spark-core_2.11-2.4.4.jar'
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:456)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:410)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:636)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:255)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1889)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1877)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1876)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1876)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2110)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2059)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2048)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:737)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2126)
at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:945)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
at org.apache.spark.rdd.RDD.collect(RDD.scala:944)
at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:299)
at org.apache.spark.sql.Dataset$$anonfun$collectToPython$1.apply(Dataset.scala:3263)
at org.apache.spark.sql.Dataset$$anonfun$collectToPython$1.apply(Dataset.scala:3260)
at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3370)
at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3369)
at org.apache.spark.sql.Dataset.collectToPython(Dataset.scala:3260)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\catalogue.py", line 8, in <module>
import importlib.metadata as importlib_metadata
ModuleNotFoundError: No module named 'importlib.metadata'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 366, in main
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 241, in read_udfs
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 168, in read_single_udf
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 69, in read_command
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\serializers.py", line 172, in _read_with_length
return self.loads(obj)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\serializers.py", line 580, in loads
return pickle.loads(obj, encoding=encoding)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\cloudpickle.py", line 875, in subimport
__import__(name)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\en_core_web_sm\__init__.py", line 5, in <module>
from spacy.util import load_model_from_init_py, get_model_meta
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\spacy\__init__.py", line 12, in <module>
from . import pipeline
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\spacy\pipeline\__init__.py", line 4, in <module>
from .pipes import Tagger, DependencyParser, EntityRecognizer, EntityLinker
File "pipes.pyx", line 1, in init spacy.pipeline.pipes
File "strings.pxd", line 23, in init spacy.syntax.nn_parser
File "strings.pyx", line 17, in init spacy.strings
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\spacy\util.py", line 16, in <module>
import catalogue
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\catalogue.py", line 10, in <module>
import importlib_metadata
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 547, in <module>
__version__ = version(__name__)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 509, in version
return distribution(distribution_name).version
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 482, in distribution
return Distribution.from_name(distribution_name)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 183, in from_name
dist = next(dists, None)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 425, in <genexpr>
for path in map(cls._switch_path, paths)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\site-packages\importlib_metadata\__init__.py", line 449, in _search_path
if not root.is_dir():
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\pathlib.py", line 1358, in is_dir
return S_ISDIR(self.stat().st_mode)
File "C:\Users\user1\AppData\Local\Continuum\anaconda3\envs\py37\lib\pathlib.py", line 1168, in stat
return self._accessor.stat(self)
OSError: [WinError 123] The filename, directory name, or volume label syntax is incorrect: 'C:\\C:\\Users\\user1\\AppData\\Local\\Continuum\\anaconda3\\envs\\py37\\Lib\\site-packages\\pyspark\\jars\\spark-core_2.11-2.4.4.jar'
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:456)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:410)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:636)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:255)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
... 1 more
I have tried upgrading Python to 3.8 but Jupyter notebooks doesn't support the newer Python version yet. Anyone able to get spacy to work with PySpark on Jupyter notebooks?
Part of the error points to https://github.com/explosion/catalogue/blob/master/catalogue.py#L7, where the import of importlib.metadata seems to go wrong, but not with the expected error type ImportError. I'll make a PR to include the ModuleNotFoundError and let's hope that would fix the issue!
[EDIT:] Hm, ModuleNotFoundError is a subclass of ImportError so I don't understand why that is not properly caught in the except block :|
[EDIT 2:] Logged an issue https://github.com/explosion/catalogue/issues/4 in case this is indeed related to catalogue.py
I am trying to validate 300 000 mail ids using validate_email package and write it to a csv in azure databricks where i am getting timeout error.
Py4JJavaError Traceback (most recent call last)
<command-365284720716518> in <module>()
----> 1 latest_dup_df.write.format("com.databricks.spark.csv").option("header", "true").save("dbfs:/FileStore/MailResult/latest_dup_df_all")
/databricks/spark/python/pyspark/sql/readwriter.py in save(self, path, format, mode, partitionBy, **options)
736 self._jwrite.save()
737 else:
--> 738 self._jwrite.save(path)
739
740 #since(1.4)
/databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py in __call__(self, *args)
1255 answer = self.gateway_client.send_command(command)
1256 return_value = get_return_value(
-> 1257 answer, self.gateway_client, self.target_id, self.name)
1258
1259 for temp_arg in temp_args:
/databricks/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
61 def deco(*a, **kw):
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
65 s = e.java_exception.toString()
/databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling o548.save.
: org.apache.spark.SparkException: Job aborted.
at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:198)
at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:192)
at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:110)
at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:108)
at org.apache.spark.sql.execution.command.DataWritingCommandExec.doExecute(commands.scala:128)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:146)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:134)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$5.apply(SparkPlan.scala:187)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:183)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:134)
at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:114)
at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:114)
at org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:710)
at org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:710)
at org.apache.spark.sql.execution.SQLExecution$$anonfun$withCustomExecutionEnv$1.apply(SQLExecution.scala:111)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:240)
at org.apache.spark.sql.execution.SQLExecution$.withCustomExecutionEnv(SQLExecution.scala:97)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:170)
at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:710)
at org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:306)
at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:292)
at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:235)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:380)
at py4j.Gateway.invoke(Gateway.java:295)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:251)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 in stage 4.0 failed 4 times, most recent failure: Lost task 2.3 in stage 4.0 (TID 16, 10.139.64.8, executor 1): org.apache.spark.SparkException: Task failed while writing rows.
at org.apache.spark.sql.execution.datasources.FileFormatWriter$.org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask(FileFormatWriter.scala:257)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1.apply(FileFormatWriter.scala:170)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1.apply(FileFormatWriter.scala:169)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.doRunTask(Task.scala:139)
at org.apache.spark.scheduler.Task.run(Task.scala:112)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:497)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1526)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:503)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/databricks/spark/python/pyspark/worker.py", line 480, in main
process()
File "/databricks/spark/python/pyspark/worker.py", line 472, in process
serializer.dump_stream(out_iter, outfile)
File "/databricks/spark/python/pyspark/serializers.py", line 456, in dump_stream
self.serializer.dump_stream(self._batched(iterator), stream)
File "/databricks/spark/python/pyspark/serializers.py", line 149, in dump_stream
for obj in iterator:
File "/databricks/spark/python/pyspark/serializers.py", line 445, in _batched
for item in iterator:
File "<string>", line 1, in <lambda>
File "/databricks/spark/python/pyspark/worker.py", line 87, in <lambda>
return lambda *a: f(*a)
File "/databricks/spark/python/pyspark/util.py", line 99, in wrapper
return f(*args, **kwargs)
File "<command-3884158641112366>", line 6, in <lambda>
File "/databricks/python/lib/python3.5/site-packages/validate_email.py", line 134, in validate_email
mx_hosts = get_mx_ip(hostname)
File "/databricks/python/lib/python3.5/site-packages/validate_email.py", line 102, in get_mx_ip
MX_DNS_CACHE[hostname] = DNS.mxlookup(hostname)
File "/databricks/python/lib/python3.5/site-packages/DNS/lazy.py", line 56, in mxlookup
l = dnslookup(name, qtype, timeout)
File "/databricks/python/lib/python3.5/site-packages/DNS/lazy.py", line 38, in dnslookup
result = Base.DnsRequest(name=name, qtype=qtype).req(timeout=timeout)
File "/databricks/python/lib/python3.5/site-packages/DNS/Base.py", line 324, in req
self.sendUDPRequest(server)
File "/databricks/python/lib/python3.5/site-packages/DNS/Base.py", line 377, in sendUDPRequest
raise first_socket_error
File "/databricks/python/lib/python3.5/site-packages/DNS/Base.py", line 352, in sendUDPRequest
r=self.processUDPReply()
File "/databricks/python/lib/python3.5/site-packages/DNS/Base.py", line 135, in processUDPReply
raise TimeoutError('Timeout')
DNS.Base.TimeoutError: Timeout
I am using Azure Databricks with python 3 and py3dns package. I am very new to spark and azure databricks. I also tried after setting DNS.defaults['server']=['8.8.8.8', '8.8.4.4']. But still unable to resolve. Below is the code i tried. Is there an efficient way to validate 3 lakh mail ids. It is taking 7-8 hours and then job gets aborted and i get timeout error. I also tried with python 2. same issue.
import DNS
DNS.defaults['server']=['8.8.8.8', '8.8.4.4']
from email_validator import validate_email, EmailNotValidError
from validate_email import validate_email
from pyspark.sql.types import BooleanType
from pyspark.sql.functions import udf
validate_mail_udf = udf(lambda x : validate_email(x,verify=True), BooleanType())
upd_df = upd_df.withColumn('is_mail_valid', validate_mail_udf(('mail_id')))
upd_df.write.format("com.databricks.spark.csv").option("header", "true").save("dbfs:/FileStore/MailResult/")
Expected result is a new column named 'is_mail_valid' having boolean values representing whether the mail really exist or not for all 300 000 records and write the result df to a csv in azure databricks
If you are using Spark 2.3 and above version then you can use vectorized UDFs which will use PYarrow. Please follow below link for more details.
https://databricks.com/blog/2017/10/30/introducing-vectorized-udfs-for-pyspark.html
validate_email(x,verify=True) checks with the SMPT server whether the host exists. So you are essentially spamming the servers with 300 000 requests which is likely to block you on any of the stages and prevent further validation.
If your goal is to make sure those emails are valid, you can run this package with verify=False which is just going to run every email against a regex. This operation on 300k records should be really quick, no more than minutes.
If you want to verify whether an email actually exists, there is no good way to do this in bulk as email servers should be doing their best to prevent you from doing that :) validate_email takes a timeout parameter though, so you can try to increase that from default, but overall I'd advise very much against this approach.
Try to check if I have NaN value in some columns with
ddf_temp = ddf.select('col1', 'col2' ...) # all int type
ddf_temp.select([count(when(isnull(c), c)).alias(c) for c in ddf_temp.columns]).show()
I could isolate which columns gives me those error but I cannot find out why I got this :
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
<ipython-input-47-76c75cf06695> in <module>()
3 # ddf_temp = ddf10.select('state_bottle_cost')
4 ddf_temp = ddf10.where(col('state_bottle_retail').isNull())
----> 5 ddf_temp.show()
6 # ddf_temp = ddf10.select('store_number', 'zip_code', 'county_number', 'category', 'vendor_number', 'pack', 'bottles_sold')
7 # ddf_temp.select([count(when(isnull(c), c)).alias(c) for c in ddf_temp.columns]).show()
3 frames
/content/spark-2.4.3-bin-hadoop2.7/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling o2010.showString.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 43.0 failed 1 times, most recent failure: Lost task 0.0 in stage 43.0 (TID 233, localhost, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 377, in main
process()
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 372, in process
serializer.dump_stream(func(split_index, iterator), outfile)
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 345, in dump_stream
self.serializer.dump_stream(self._batched(iterator), stream)
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 141, in dump_stream
for obj in iterator:
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 334, in _batched
for item in iterator:
File "<string>", line 1, in <lambda>
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 85, in <lambda>
return lambda *a: f(*a)
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/util.py", line 99, in wrapper
return f(*args, **kwargs)
File "<ipython-input-11-9ec9e286520d>", line 3, in <lambda>
TypeError: 'NoneType' object is not subscriptable
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:452)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:406)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1124)
at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1130)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$class.foreach(Iterator.scala:891)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
at org.apache.spark.api.python.PythonRDD$.writeIteratorToStream(PythonRDD.scala:224)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$2.writeIteratorToStream(PythonUDFRunner.scala:50)
at org.apache.spark.api.python.BasePythonRunner$WriterThread$$anonfun$run$1.apply(PythonRunner.scala:345)
at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1945)
at org.apache.spark.api.python.BasePythonRunner$WriterThread.run(PythonRunner.scala:194)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1889)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1877)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1876)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1876)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2110)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2059)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2048)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:737)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:365)
at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3383)
at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2544)
at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2544)
at org.apache.spark.sql.Dataset$$anonfun$53.apply(Dataset.scala:3364)
at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3363)
at org.apache.spark.sql.Dataset.head(Dataset.scala:2544)
at org.apache.spark.sql.Dataset.take(Dataset.scala:2758)
at org.apache.spark.sql.Dataset.getRows(Dataset.scala:254)
at org.apache.spark.sql.Dataset.showString(Dataset.scala:291)
at sun.reflect.GeneratedMethodAccessor122.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 377, in main
process()
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 372, in process
serializer.dump_stream(func(split_index, iterator), outfile)
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 345, in dump_stream
self.serializer.dump_stream(self._batched(iterator), stream)
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 141, in dump_stream
for obj in iterator:
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 334, in _batched
for item in iterator:
File "<string>", line 1, in <lambda>
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 85, in <lambda>
return lambda *a: f(*a)
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/util.py", line 99, in wrapper
return f(*args, **kwargs)
File "<ipython-input-11-9ec9e286520d>", line 3, in <lambda>
TypeError: 'NoneType' object is not subscriptable
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:452)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:406)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1124)
at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1130)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$class.foreach(Iterator.scala:891)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
at org.apache.spark.api.python.PythonRDD$.writeIteratorToStream(PythonRDD.scala:224)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$2.writeIteratorToStream(PythonUDFRunner.scala:50)
at org.apache.spark.api.python.BasePythonRunner$WriterThread$$anonfun$run$1.apply(PythonRunner.scala:345)
at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1945)
at org.apache.spark.api.python.BasePythonRunner$WriterThread.run(PythonRunner.scala:194)
EDIT :
remove_first_char = udf(lambda x: x[1:])
ddf4 = ddf3.withColumn('State Bottle Cost', remove_first_char('State Bottle Cost'))
multiply_by_100 = udf(lambda x: x*100)
ddf5 = ddf4.withColumn('State Bottle Cost', ddf4['State Bottle Cost'].cast(DoubleType()))
ddf5 = ddf5.withColumn('State Bottle Cost', multiply_by_100('State Bottle Cost'))
ddf5 = ddf5.withColumn('State Bottle Cost', ddf5['State Bottle Cost'].cast(IntegerType()))
You have Nones in your dataframe and by applying the UDF it will execute None[1:] which gives you the error TypeError: 'NoneType' object is not subscriptable (you can try it in a python shell).
When using built-in pyspark functions it will always map null->null. If you would want to do it via UDF (which is not recommended since spark does internal optimization on built-in sql functions), you would need to catch the None case: lambda x: x if not x else x[1:]
I downloaded a text file from this site: http://snap.stanford.edu/data/web-Amazon-links.html with the intent to do some text analytics in Pyspark.
So I set up my spark context:
from pyspark import SparkConf, SparkContext
conf = SparkConf().setAppName('app')
sc = SparkContext(conf=conf)
from pyspark.sql import SQLContext
I grabbed the file:
Data1 =sc.textFile('/home/john/Downloads/Software.txt.gz').map(lambda line: line.split(','))
The data looks like this:
[['product/productId: B000068VBQ'],
['product/title: Fisher-Price Rescue Heroes: Lava Landslide'],
['product/price: 8.88'],
['review/userId: unknown'],
['review/profileName: unknown'],
['review/helpfulness: 11/11'],
['review/score: 2.0'],
['review/time: 1042070400'],
['review/summary: Requires too much coordination'],
['review/text: I bought this software for my 5 year old. He has a couple of the other RH software games and he likes them a lot. This game',
' however'
But then I try the groupByKey:
sorted(Data1.groupByKey().mapValues(list).collect())
And I get this error:
Py4JJavaError Traceback (most recent call last)
<ipython-input-15-a3c92709547a> in <module>
----> 1 sorted(Data1.groupByKey().mapValues(list).collect())
~/anaconda3/lib/python3.7/site-packages/pyspark/rdd.py in collect(self)
814 """
815 with SCCallSiteSync(self.context) as css:
--> 816 sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
817 return list(_load_from_socket(sock_info, self._jrdd_deserializer))
818
~/anaconda3/lib/python3.7/site-packages/py4j/java_gateway.py in __call__(self, *args)
1255 answer = self.gateway_client.send_command(command)
1256 return_value = get_return_value(
-> 1257 answer, self.gateway_client, self.target_id, self.name)
1258
1259 for temp_arg in temp_args:
~/anaconda3/lib/python3.7/site-packages/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 5.0 failed 1 times, most recent failure: Lost task 0.0 in stage 5.0 (TID 4, localhost, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 372, in main
process()
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 367, in process
serializer.dump_stream(func(split_index, iterator), outfile)
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/rdd.py", line 2499, in pipeline_func
return func(split, prev_func(split, iterator))
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/rdd.py", line 2499, in pipeline_func
return func(split, prev_func(split, iterator))
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/rdd.py", line 352, in func
return f(iterator)
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/rdd.py", line 1945, in combine
merger.mergeValues(iterator)
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/shuffle.py", line 238, in mergeValues
for k, v in iterator:
ValueError: not enough values to unpack (expected 2, got 1)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:452)
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:588)
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:571)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:406)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1124)
at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1130)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:121)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1887)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1875)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1874)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1874)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2108)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2057)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2046)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:737)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2126)
at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:945)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
at org.apache.spark.rdd.RDD.collect(RDD.scala:944)
at org.apache.spark.api.python.PythonRDD$.collectAndServe(PythonRDD.scala:166)
at org.apache.spark.api.python.PythonRDD.collectAndServe(PythonRDD.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 372, in main
process()
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 367, in process
serializer.dump_stream(func(split_index, iterator), outfile)
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/rdd.py", line 2499, in pipeline_func
return func(split, prev_func(split, iterator))
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/rdd.py", line 2499, in pipeline_func
return func(split, prev_func(split, iterator))
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/rdd.py", line 352, in func
return f(iterator)
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/rdd.py", line 1945, in combine
merger.mergeValues(iterator)
File "/home/john/anaconda3/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/shuffle.py", line 238, in mergeValues
for k, v in iterator:
ValueError: not enough values to unpack (expected 2, got 1)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:452)
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:588)
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:571)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:406)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1124)
at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1130)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:121)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
... 1 more
The problem is in your data and the map you used.
The principle of groupByKey is to use a key and a value to group by key and perform some aggregation on your value data.
But in your RDD, you do not have that key --> value data, just a list of lists ... that is the reason why you have this error.
The list is the 1 argument causing the error message.
I do not know exactly your data and what you want to achieve, but I think you could do something like that for example :
Data1 =sc.textFile('/home/john/Downloads/Software.txt.gz').flatMap(lambda line: line.split("', '"))
Data2 = Data1.map(lambda line : line.split(':')).filter(lambda x : len(x)==2)
sorted(Data2.groupByKey().mapValues(set).collect())