How can we get access to SQLContext in a flatmap function? - apache-spark

SQlContext when accessed as below using a singleton class works fine in local mode, however when submitted spark master, it becomes null and throws nullpointer exceptions. How can this be fixed?
In our usecase FlatMapFunction is expected to query another DStream and the results returned are used to create a new stream.
Have extended the JavaStatefulNetworkWordCount example to print the changes to the state. I need to access the rdds from a stateful dstream in another dstream using sqlcontext in order to create another dstream. How can this be achieved?
import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.spark.HashPartitioner;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.StorageLevels;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import scala.Tuple2;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
public class JavaStatefulNetworkWordCount {
private static final Pattern SPACE = Pattern.compile(" ");
public static void main(String[] args) {
if (args.length < 2) {
System.err.println("Usage: JavaStatefulNetworkWordCount <hostname> <port>");
System.exit(1);
}
// Update the cumulative count function
final Function2<List<Integer>, Optional<Integer>, Optional<Integer>> updateFunction =
new Function2<List<Integer>, Optional<Integer>, Optional<Integer>>() {
#Override
public Optional<Integer> call(List<Integer> values, Optional<Integer> state) {
Integer newSum = state.or(0);
for (Integer value : values) {
newSum += value;
}
return Optional.of(newSum);
}
};
// Create the context with a 1 second batch size
SparkConf sparkConf = new SparkConf().setAppName("JavaStatefulNetworkWordCount");
// sparkConf.setMaster("local[5]");
// sparkConf.set("spark.executor.uri", "target/rkspark-0.0.1-SNAPSHOT.jar");
JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1));
ssc.checkpoint(".");
SQLContext sqlContext = JavaSQLContextSingleton.getInstance(ssc.sparkContext().sc());
// Initial RDD input to updateStateByKey
List<Tuple2<String, Integer>> tuples = Arrays.asList(new Tuple2<String, Integer>("hello", 1),
new Tuple2<String, Integer>("world", 1));
JavaPairRDD<String, Integer> initialRDD = ssc.sc().parallelizePairs(tuples);
JavaReceiverInputDStream<String> lines = ssc.socketTextStream(
args[0], Integer.parseInt(args[1]), StorageLevels.MEMORY_AND_DISK_SER_2);
JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
#Override
public Iterable<String> call(String x) {
return Lists.newArrayList(SPACE.split(x));
}
});
JavaPairDStream<String, Integer> wordsDstream = words.mapToPair(
new PairFunction<String, String, Integer>() {
#Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
}
});
// This will give a Dstream made of state (which is the cumulative count of the words)
JavaPairDStream<String, Integer> stateDstream = wordsDstream.updateStateByKey(updateFunction,
new HashPartitioner(ssc.sparkContext().defaultParallelism()), initialRDD);
JavaDStream<WordCount> countStream = stateDstream.map(new Function<Tuple2<String, Integer>, WordCount>(){
#Override
public WordCount call(Tuple2<String, Integer> v1) throws Exception {
return new WordCount(v1._1,v1._2);
}});
countStream.foreachRDD(new Function<JavaRDD<WordCount>,Void>() {
#Override
public Void call(JavaRDD<WordCount> rdd) {
SQLContext sqlContext = JavaSQLContextSingleton.getInstance(rdd.context());
DataFrame wordsDataFrame = sqlContext.createDataFrame(rdd, WordCount.class);
wordsDataFrame.registerTempTable("words");
return null;
}
});
wordsDstream.map(new Function<Tuple2<String,Integer>,String>(){
#Override
public String call(Tuple2<String, Integer> v1) throws Exception {
// Below sql context becomes null when run on a master instead of local.
SQLContext sqlContext = JavaSQLContextSingleton.getInstance();
DataFrame counterpartyIds = sqlContext.sql("select * from words where word ='"+v1._1()+"'");
Row[] rows = counterpartyIds.cache().collect();
if(rows.length>0){
Row row = rows[0];
return row.getInt(0)+"-"+ row.getString(1);
} else {
return "";
}
}
}).print();
ssc.start();
ssc.awaitTermination();
}
}
class JavaSQLContextSingleton {
static private transient SQLContext instance = null;
static public SQLContext getInstance(SparkContext sparkContext) {
if (instance == null) {
instance = new SQLContext(sparkContext);
}
return instance;
}
}
import java.io.Serializable;
public class WordCount implements Serializable{
public String getWord() {
return word;
}
public void setWord(String word) {
this.word = word;
}
public int getCount() {
return count;
}
public void setCount(int count) {
this.count = count;
}
String word;
public WordCount(String word, int count) {
super();
this.word = word;
this.count = count;
}
int count;
}

The SparkContext (and thus the SQLContext) is only available in the Driver and not serialized to the Workers. Your program works in local since it is running in the context of the driver where the context is available.

Related

Filter JavaRDD based on a ArrayList of index id's

I have Dataset df with contents which have an index as accountid and I also have array list with accountids. How do I filter or map the Dataset to create a new Dataset that has only contents based on the accountid in the arraylist.
I am using Java 8
List<String> accountIdList= new ArrayList<String>();
accountIdList.add("1001");
accountIdList.add("1002");
accountIdList.add("1003");
accountIdList.add("1004");
Dataset<Row> filteredRows= df.filter(p-> df.col("accountId").equals(accountIdList));
I am trying to pass the list itself for the comparison operator do you think this is the correct approach
The Java syntax is
If you are looking for the java syntax
Dataset<Row> filteredRows= df.where(df.col("accountId").isin(accountIdList.toArray()));
Use Column.isin method:
import scala.collection.JavaConversions;
import static org.apache.spark.sql.functions.*;
Dataset<Row> filteredRows = df.where(col("accountId").isin(
JavaConversions.asScalaIterator(accountIdList.iterator()).toSeq()
));
Here is a working code in Java. Hope it helps .
This is my sampleFile Content (input):-
1001
1008
1005
1009
1010
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.spark.api.java.function.FilterFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.SparkSession;
public class DatasetFilter {
private static List<String> sampleList = new ArrayList<String>();
public static void main(String[] args)
{
sampleList.add("1001");
sampleList.add("1002");
sampleList.add("1003");
sampleList.add("1004");
sampleList.add("1005");
SparkSession sparkSession = SparkSession.builder()
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.config("spark.sql.warehouse.dir", "file:///C:/Users/user/workspace/Validation/spark-warehouse")
.master("local[*]").getOrCreate();
//Read the source-file.
Dataset<String> src = sparkSession.read().textFile("C:\\Users\\user\\Desktop\\dataSetFilterTest.txt");
src.show(10);
//Apply filter
Dataset<String> filteredSource = src.filter(new FilterFunction<String>() {
private static final long serialVersionUID = 1L;
#Override
public boolean call(String value) throws Exception {
System.out.println("***************************************");
boolean status = false;
Iterator<String> iterator = sampleList.iterator();
while (iterator.hasNext()) {
String val = iterator.next();
System.out.println("Val is :: " + val + " Value is :: " + value);
if (value.equalsIgnoreCase(val)) {
status = true;
break;
}
}
return status;
}
});
filteredSource.show();
System.out.println("Completed the job :)");
}
}
Output:-

Spark Parallelism with wholeTextFiles

I am trying to use wholeTextFiles API for file processing. I do have lot of .gz files in a folder and want to read them with the wholeTextFiles API.
I have 4 executors with each 1 core with 2GB RAM on each executor.
Only 2 executors are processing the job and the processing is really slow. The other two executors are sitting idle.
How do i spread the job to other 2 executors to increase the parallelism.?
package com.sss.ss.ss.WholeText;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Iterator;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.hive.HiveContext;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import scala.Tuple2;
public class WholeText {
public static class mySchema implements Serializable {
private String CFIELD1 ;
private String CFIELD2 ;
public String getCFIELD1()
{
return CFIELD1;
}
public void setCFIELD1(String cFIELD1)
{
CFIELD1 = cFIELD1;
}
public String getCFIELD2()
{
return CFIELD2;
}
public void setCFIELD2(String cFIELD2)
{
CFIELD2 = cFIELD2;
}
}
public static void main(String[] args) throws InterruptedException {
SparkConf sparkConf = new SparkConf().setAppName("My app")
.setMaster("mymaster..")
.set("spark.driver.allowMultipleContexts", "true");
JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(15));
JavaPairRDD<String, String> wholeTextFiles = jssc.sparkContext().wholeTextFiles(args[0],Integer.parseInt(args[3]));
Integer ll = wholeTextFiles.getNumPartitions();
System.out.println("Number of Partitions"+ll);
JavaRDD<String> stringRDD = wholeTextFiles.
map(
new Function<Tuple2<String, String>, String>() {
private static final long serialVersionUID = -551872585218963131L;
public String call(Tuple2<String, String> v1) throws Exception
{
return v1._2;
}
}
).
flatMap
(new FlatMapFunction<String, String>()
{
public Iterator<String> call(String t) throws Exception
{
return Arrays.asList(t.split("\\r?\\n")).iterator();
}
}).
filter(new Function<String, Boolean>() {
private static final long serialVersionUID = 1L;
public Boolean call(String t) throws Exception {
int colons = 0;
String s = t;
if(s == null || s.trim().length() < 1) {
return false;
}
for(int i = 0; i < s.length(); i++) {
if(s.charAt(i) == ';') colons++;
}
System.out.println("colons="+colons);
if ((colons <=3)){
return false;
}
return true;
}
});
JavaRDD<mySchema> schemaRDD = stringRDD.map(new Function<String, mySchema>()
{
private static final long serialVersionUID = 1L;
public mySchema call(String line) throws Exception
{
String[] parts = line.split(";",-1);
mySchema mySchema = new mySchema();
mySchema.setCFIELD1 (parts[0]);
mySchema.setCFIELD2 (parts[1]);
return mySchema;
}
});
SQLContext hc = new HiveContext(jssc.sparkContext());
Dataset<Row> df = hc.createDataFrame(schemaRDD, mySchema.class);
df.createOrReplaceTempView("myView");
hc.sql("INSERT INTO -----
"from myView");
hc.sql("INSERT INTO .......
"from myView");
}
}

spark pipeline KMeansModel clusterCenters

I'm using a pipeline to cluster text documents. The last stage in the pipeline is ml.clustering.KMeans which gives me a DataFrame with a column of cluster predictions. I would like to add the cluster centers as a column as well. I understand I can execute Vector[] clusterCenters = kmeansModel.clusterCenters(); and then convert the results into a DataFrame and join said results to the other DataFrame however I was hoping to find a way to accomplish this in a way similar to the Kmeans code below:
KMeans kMeans = new KMeans()
.setFeaturesCol("pca")
.setPredictionCol("kmeansclusterprediction")
.setK(5)
.setInitMode("random")
.setSeed(43L)
.setInitSteps(3)
.setMaxIter(15);
pipeline.setStages( ...
I was able extend KMeans and call the fit method via a pipeline however I'm not having any luck extending KMeansModel ... the constructor requires a String uid and a KMeansModel but I don't know how to pass in the model when defining the stages and calling the setStages method.
I also looked into extending KMeans.scala however as a Java developer I only understand about half the code thus, I'm hoping someone may have an easier solution before I tackle that. Ultimately I would like to end up with a DataFrame as follows:
+--------------------+-----------------------+--------------------+
| docid|kmeansclusterprediction|kmeansclustercenters|
+--------------------+-----------------------+--------------------+
|2bcbcd54-c11a-48c...| 2| [-0.04, -7.72]|
|0e644620-f5ff-40f...| 3| [0.23, 1.08]|
|665c1c2b-3065-4e8...| 3| [0.23, 1.08]|
|598c6268-e4b9-4c9...| 0| [-15.81, 0.01]|
+--------------------+-----------------------+--------------------+
Any help or hints is greatly appreciated.
Thank you
Answering my own question ... this was actually easy ... I extended KMeans and KMeansModel ... the extended Kmeans fit method must return the extended KMeansModel. For example:
public class AnalyticsKMeansModel extends KMeansModel ...
public class AnalyticsKMeans extends org.apache.spark.ml.clustering.KMeans { ...
public AnalyticsKMeansModel fit(DataFrame dataset) {
JavaRDD<Vector> javaRDD = dataset.select(this.getFeaturesCol()).toJavaRDD().map(new Function<Row, Vector>(){
private static final long serialVersionUID = -4588981547209486909L;
#Override
public Vector call(Row row) throws Exception {
Object point = row.getAs("pca");
Vector vector = (Vector)point;
return vector;
}
});
RDD<Vector> rdd = JavaRDD.toRDD(javaRDD);
org.apache.spark.mllib.clustering.KMeans algo = new org.apache.spark.mllib.clustering.KMeans().setK(BoxesRunTime.unboxToInt(this.$((Param<?>)this.k()))).setInitializationMode((String)this.$(this.initMode())).setInitializationSteps(BoxesRunTime.unboxToInt((Object)this.$((Param<?>)this.initSteps()))).setMaxIterations(BoxesRunTime.unboxToInt((Object)this.$((Param<?>)this.maxIter()))).setSeed(BoxesRunTime.unboxToLong((Object)this.$((Param<?>)this.seed()))).setEpsilon(BoxesRunTime.unboxToDouble((Object)this.$((Param<?>)this.tol())));
org.apache.spark.mllib.clustering.KMeansModel parentModel = algo.run(rdd);
AnalyticsKMeansModel model = new AnalyticsKMeansModel(this.uid(), parentModel);
return (AnalyticsKMeansModel) this.copyValues((Params)model, this.copyValues$default$2());
}
Once I changed the fit method to return my extended KMeansModel class everything worked as expected.
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.ml.clustering.KMeansModel;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import AnalyticsCluster;
public class AnalyticsKMeansModel extends KMeansModel {
private static final long serialVersionUID = -8893355418042946358L;
public AnalyticsKMeansModel(String uid, org.apache.spark.mllib.clustering.KMeansModel parentModel) {
super(uid, parentModel);
}
public DataFrame transform(DataFrame dataset) {
Vector[] clusterCenters = super.clusterCenters();
List<AnalyticsCluster> analyticsClusters = new ArrayList<AnalyticsCluster>();
for (int i=0; i<clusterCenters.length;i++){
Integer clusterId = super.predict(clusterCenters[i]);
Vector vector = clusterCenters[i];
double[] point = vector.toArray();
AnalyticsCluster analyticsCluster = new AnalyticsCluster(clusterId, point, 0L);
analyticsClusters.add(analyticsCluster);
}
JavaSparkContext jsc = JavaSparkContext.fromSparkContext(dataset.sqlContext().sparkContext());
JavaRDD<AnalyticsCluster> javaRDD = jsc.parallelize(analyticsClusters);
JavaRDD<Row> javaRDDRow = javaRDD.map(new Function<AnalyticsCluster, Row>() {
private static final long serialVersionUID = -2677295862916670965L;
#Override
public Row call(AnalyticsCluster cluster) throws Exception {
Row row = RowFactory.create(
String.valueOf(cluster.getID()),
String.valueOf(Arrays.toString(cluster.getCenter()))
);
return row;
}
});
List<StructField> schemaColumns = new ArrayList<StructField>();
schemaColumns.add(DataTypes.createStructField(this.getPredictionCol(), DataTypes.StringType, false));
schemaColumns.add(DataTypes.createStructField("clusterpoint", DataTypes.StringType, false));
StructType dataFrameSchema = DataTypes.createStructType(schemaColumns);
DataFrame clusterPointsDF = dataset.sqlContext().createDataFrame(javaRDDRow, dataFrameSchema);
//SOMETIMES "K" IS SET TO A VALUE GREATER THAN THE NUMBER OF ACTUAL ROWS OF DATA ... GET DISTINCT VALUES
clusterPointsDF.registerTempTable("clusterPoints");
DataFrame clustersDF = clusterPointsDF.sqlContext().sql("select distinct " + this.getPredictionCol()+ ", clusterpoint from clusterPoints");
clustersDF.cache();
clusterPointsDF.sqlContext().dropTempTable("clusterPoints");
DataFrame transformedDF = super.transform(dataset);
transformedDF.cache();
DataFrame df = transformedDF.join(clustersDF,
transformedDF.col(this.getPredictionCol()).equalTo(clustersDF.col(this.getPredictionCol())), "inner")
.drop(clustersDF.col(this.getPredictionCol()));
return df;
}
}
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.ml.param.Param;
import org.apache.spark.ml.param.Params;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.rdd.RDD;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.Row;
import scala.runtime.BoxesRunTime;
public class AnalyticsKMeans extends org.apache.spark.ml.clustering.KMeans {
private static final long serialVersionUID = 8943702485821267996L;
private static String uid = null;
public AnalyticsKMeans(String uid){
AnalyticsKMeans.uid= uid;
}
public AnalyticsKMeansModel fit(DataFrame dataset) {
JavaRDD<Vector> javaRDD = dataset.select(this.getFeaturesCol()).toJavaRDD().map(new Function<Row, Vector>(){
private static final long serialVersionUID = -4588981547209486909L;
#Override
public Vector call(Row row) throws Exception {
Object point = row.getAs("pca");
Vector vector = (Vector)point;
return vector;
}
});
RDD<Vector> rdd = JavaRDD.toRDD(javaRDD);
org.apache.spark.mllib.clustering.KMeans algo = new org.apache.spark.mllib.clustering.KMeans().setK(BoxesRunTime.unboxToInt(this.$((Param<?>)this.k()))).setInitializationMode((String)this.$(this.initMode())).setInitializationSteps(BoxesRunTime.unboxToInt((Object)this.$((Param<?>)this.initSteps()))).setMaxIterations(BoxesRunTime.unboxToInt((Object)this.$((Param<?>)this.maxIter()))).setSeed(BoxesRunTime.unboxToLong((Object)this.$((Param<?>)this.seed()))).setEpsilon(BoxesRunTime.unboxToDouble((Object)this.$((Param<?>)this.tol())));
org.apache.spark.mllib.clustering.KMeansModel parentModel = algo.run(rdd);
AnalyticsKMeansModel model = new AnalyticsKMeansModel(this.uid(), parentModel);
return (AnalyticsKMeansModel) this.copyValues((Params)model, this.copyValues$default$2());
}
}
import java.io.Serializable;
import java.util.Arrays;
public class AnalyticsCluster implements Serializable {
private static final long serialVersionUID = 6535671221958712594L;
private final int id;
private volatile double[] center;
private volatile long count;
public AnalyticsCluster(int id, double[] center, long initialCount) {
// Preconditions.checkArgument(center.length > 0);
// Preconditions.checkArgument(initialCount >= 1);
this.id = id;
this.center = center;
this.count = initialCount;
}
public int getID() {
return id;
}
public double[] getCenter() {
return center;
}
public long getCount() {
return count;
}
public synchronized void update(double[] newPoint, long newCount) {
int length = center.length;
// Preconditions.checkArgument(length == newPoint.length);
double[] newCenter = new double[length];
long newTotalCount = newCount + count;
double newToTotal = (double) newCount / newTotalCount;
for (int i = 0; i < length; i++) {
double centerI = center[i];
newCenter[i] = centerI + newToTotal * (newPoint[i] - centerI);
}
center = newCenter;
count = newTotalCount;
}
#Override
public synchronized String toString() {
return id + " " + Arrays.toString(center) + " " + count;
}
// public static void main(String[] args) {
// double[] point = new double[2];
// point[0] = 0.10150532938119154;
// point[1] = -0.23734759238651829;
//
// Cluster cluster = new Cluster(1,point, 10L);
// System.out.println("cluster: " + cluster.toString());
// }
}

Instead of using defined spark streaming window it fetches the complete messages from kafka in the one go

Kafka already filled with 1 crores of messages, then i execute this code base, as per expectation it should give the count of the tuple/messages processed in 2 sec window, but it return the complete message count i.e. 1 crore in the first time and further it gives 0,0,0 ...Although it is taking more than 2 seconds to print that message ..
Inline source code is
import java.io.Serializable;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import kafka.serializer.StringDecoder;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Tuple2;
public class Test implements Serializable {
private static final long serialVersionUID = -5863692754478476225L;
private static final String KEY_SPARK_MASTER = "spark://machine1-1467:7077";
private static final String KEY_APP_NAME = "SQLWordCount";
private static final String KEY_TOPIC = "add104";
private static JavaStreamingContext streamingContext = null;
private Test() {
disableLogging();
doInit();
process();
}
public static void main(String[] params) {
System.out.println("------------Started---------------" + new Date().toString());
new Test();
}
private void disableLogging() {
Logger.getLogger("org").setLevel(Level.OFF);
Logger.getLogger("akka").setLevel(Level.OFF);
}
private void doInit() {
SparkConf sparkConf = new SparkConf().setMaster(KEY_SPARK_MASTER).setAppName(KEY_APP_NAME);
streamingContext = new JavaStreamingContext(sparkConf, new Duration(500));
streamingContext.checkpoint("/home/impadmin/checkpoint");
}
private HashMap<String, String> getKafkaParams() {
HashMap<String, String> kafkaParams = new HashMap<String, String>();
kafkaParams.put("metadata.broker.list", "localhost:9092");
kafkaParams.put("auto.offset.reset", "smallest");
kafkaParams.put("group.id", "id7");
return kafkaParams;
}
private HashSet<String> getTopicSet() {
HashSet<String> topic = new HashSet<String>(Arrays.asList(KEY_TOPIC));
return topic;
}
private void process() {
try {
JavaPairInputDStream<String, String> messages = KafkaUtils
.createDirectStream(streamingContext, String.class,
String.class, StringDecoder.class,
StringDecoder.class, getKafkaParams(),
getTopicSet());
JavaPairDStream<String, String> windowedStream = messages.window(
new Duration(2000), new Duration(2000));
JavaDStream<String> lines = windowedStream
.map(new Function<Tuple2<String, String>, String>() {
public String call(Tuple2<String, String> tuple2) {
return tuple2._2();
}
});
lines.foreachRDD(new Function<JavaRDD<String>, Void>() {
public Void call(JavaRDD<String> rdd) throws Exception {
System.out.println(new Date().toString() + " In the Call method" + rdd.count());
JavaRDD<Stock> rowRDD = rdd
.map(new Function<String, Stock>() {
#Override
public Stock call(String arg0) throws Exception {
return null;
}
});
return null;
};
});
streamingContext.start();
streamingContext.awaitTermination();
} catch (Exception e) {
System.out.println("Exception: " + e.toString());
}
}
}
Because you are using kafkaParams.put("auto.offset.reset", "smallest");, it is going back and bringing all the msgs.
Change is to kafkaParams.put("auto.offset.reset", "largest"); to consume only new msgs.
If your expectation is streaming context is going to chunk up all the msgs into 2 sec batches, I doubt it will do that. However, you can set offset range and read all existing data in multiple batches.
However, new msgs will be batched in every 2 sec or whatever interval you set up

Not Serializable exception when integrating SQL and Spark Streaming

In Addition to Not Serializable exception when integrating Spark SQL and Spark Streaming
My source code
public static void main(String args[]) {
SparkConf sparkConf = new SparkConf().setAppName("NumberCount");
JavaSparkContext jc = new JavaSparkContext(sparkConf);
JavaStreamingContext jssc = new JavaStreamingContext(jc, new Duration(2000));
jssc.addStreamingListener(new WorkCountMonitor());
int numThreads = Integer.parseInt(args[3]);
Map<String,Integer> topicMap = new HashMap<String,Integer>();
String[] topics = args[2].split(",");
for (String topic : topics) {
topicMap.put(topic, numThreads);
}
JavaPairReceiverInputDStream<String,String> data = KafkaUtils.createStream(jssc, args[0], args[1], topicMap);
data.print();
JavaDStream<Person> streamData = data.map(new Function<Tuple2<String, String>, Person>() {
public Person call(Tuple2<String,String> v1) throws Exception {
String[] stringArray = v1._2.split(",");
Person Person = new Person();
Person.setName(stringArray[0]);
Person.setAge(stringArray[1]);
return Person;
}
});
final JavaSQLContext sqlContext = new JavaSQLContext(jc);
streamData.foreachRDD(new Function<JavaRDD<Person>,Void>() {
public Void call(JavaRDD<Person> rdd) {
JavaSchemaRDD subscriberSchema = sqlContext.applySchema(rdd, Person.class);
subscriberSchema.registerAsTable("people");
System.out.println("all data");
JavaSchemaRDD names = sqlContext.sql("SELECT name FROM people");
System.out.println("afterwards");
List<String> males = new ArrayList<String>();
males = names.map(new Function<Row,String>() {
public String call(Row row) {
return row.getString(0);
}
}).collect();
System.out.println("before for");
for (String name : males) {
System.out.println(name);
}
return null;
}
});
jssc.start();
jssc.awaitTermination();
}
JavaSQLContext is also declared outside the ForeachRDD loop but i am still getting NonSerializableException
14/12/23 23:49:38 ERROR JobScheduler: Error running job streaming job 1419378578000 ms.1
org.apache.spark.SparkException: Task not serializable
at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:166)
at org.apache.spark.util.ClosureCleaner$.clean(ClosureCleaner.scala:158)
at org.apache.spark.SparkContext.clean(SparkContext.scala:1435)
at org.apache.spark.rdd.RDD.map(RDD.scala:271)
at org.apache.spark.api.java.JavaRDDLike$class.map(JavaRDDLike.scala:78)
at org.apache.spark.sql.api.java.JavaSchemaRDD.map(JavaSchemaRDD.scala:42)
at com.basic.spark.NumberCount$2.call(NumberCount.java:79)
at com.basic.spark.NumberCount$2.call(NumberCount.java:67)
at org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$1.apply(JavaDStreamLike.scala:274)
at org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$1.apply(JavaDStreamLike.scala:274)
at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1.apply(DStream.scala:529)
at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1.apply(DStream.scala:529)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply$mcV$sp(ForEachDStream.scala:42)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:40)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:40)
at scala.util.Try$.apply(Try.scala:161)
at org.apache.spark.streaming.scheduler.Job.run(Job.scala:32)
at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler.run(JobScheduler.scala:171)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:724)
Caused by: java.io.NotSerializableException: org.apache.spark.sql.api.java.JavaSQLContext
at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1181)
at java.io.ObjectOutputStream.defaultWriteFields(ObjectOutputStream.java:1541)
at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1506)
at java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1429)
at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1175)
at java.io.ObjectOutputStream.defaultWriteFields(ObjectOutputStream.java:1541)
at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1506)
at java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1429)
at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1175)
at java.io.ObjectOutputStream.defaultWriteFields(ObjectOutputStream.java:1541)
at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1506)
at java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1429)
at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1175)
at java.io.ObjectOutputStream.writeObject(ObjectOutputStream.java:347)
at org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:42)
at org.apache.spark.serializer.JavaSerializerInstance.serialize(JavaSerializer.scala:73)
at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:164)
... 20 more
I appreciate if you have any suggestion.
Have you implemented Serializable interface in Person pojo class.Also can you try declaring topicMap as final
here is the working code
package com.basic.spark;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.api.java.JavaSQLContext;
import org.apache.spark.sql.api.java.JavaSchemaRDD;
import org.apache.spark.sql.api.java.Row;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Tuple2;
public class NumberCount implements Serializable {
transient SparkConf sparkConf = new SparkConf().setAppName("NumberCount");
transient JavaSparkContext jc = new JavaSparkContext(sparkConf);
transient JavaStreamingContext jssc_1 = new JavaStreamingContext(jc, new Duration(1000));
transient JavaSQLContext sqlContext = new JavaSQLContext(jc);
transient Producer producer = configureKafka();
public static void main(String args[]) {
(new NumberCount()).job_1(args);
}
public void job_1(String...args) {
jssc_1.addStreamingListener(new WorkCountMonitor());
int numThreads = Integer.parseInt(args[3]);
Map<String,Integer> topicMap = new HashMap<String,Integer>();
String[] topics = args[2].split(",");
for (String topic : topics) {
topicMap.put(topic, numThreads);
}
JavaPairReceiverInputDStream<String,String> data = KafkaUtils.createStream(jssc_1, args[0], args[1], topicMap);
data.window(new Duration(10000), new Duration(2000));
JavaDStream<String> streamData = data.map(new Function<Tuple2<String, String>, String>() {
public String call(Tuple2<String,String> v1) {
return v1._2;
}
});
streamData.foreachRDD(new Function<JavaRDD<String>,Void>() {
public Void call(JavaRDD<String> rdd) {
if (rdd.count() < 1)
return null;
try {
JavaSchemaRDD eventSchema = sqlContext.jsonRDD(rdd);
eventSchema.registerTempTable("event");
System.out.println("all data");
JavaSchemaRDD names = sqlContext.sql("SELECT deviceId, count(*) FROM event group by deviceId");
System.out.println("afterwards");
// List<Long> males = new ArrayList<Long>();
//
// males = names.map(new Function<Row,Long>() {
// public Long call(Row row) {
// return row.getLong(0);
// }
// }).collect();
// System.out.println("before for");
// ArrayList<KeyedMessage<String, String>> data = new ArrayList<KeyedMessage<String, String>>();
// for (Long name : males) {
// System.out.println("**************"+name);
// writeToKafka_1(data, String.valueOf(name));
// }
// producer.send(data);
List<String> deviceDetails = new ArrayList<String>();
deviceDetails = names.map(new Function<Row,String>() {
public String call(Row row) {
return row.getString(0) +":" + row.getLong(1);
}
}).collect();
System.out.println("before for");
ArrayList<KeyedMessage<String, String>> data = new ArrayList<KeyedMessage<String, String>>();
for (String name : deviceDetails) {
System.out.println("**************"+name);
writeToKafka_1(data, name);
}
producer.send(data);
} catch (Exception e) {
System.out.println("#ERROR_1# #" + rdd);
e.printStackTrace();
}
return null;
}
});
jssc_1.start();
jssc_1.awaitTermination();
}
public Producer<String, String> configureKafka() {
Properties props = new Properties();
props.put("metadata.broker.list", "xx.xx.xx.xx:9092");
props.put("serializer.class", "kafka.serializer.StringEncoder");
props.put("compression.codec", "2");
props.put("request.required.acks", "0");
props.put("producer.type", "sync");
ProducerConfig config = new ProducerConfig(props);
Producer<String, String> producer = new Producer<String, String>(config);
return producer;
}
public void writeToKafka_1(ArrayList<KeyedMessage<String,String>> list, String msg) {
list.add(new KeyedMessage<String,String>("my-replicated-topic-1", "", msg));
}
}

Resources