Академический Документы
Профессиональный Документы
Культура Документы
client;
import java.io.Serializable;
import java.util.Iterator;
import java.util.Properties;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import consumer.kafka.MessageAndMetadata;
import consumer.kafka.ProcessedOffsetManager;
import consumer.kafka.ReceiverLauncher;
@SuppressWarnings("serial")
public class SampleConsumer implements Serializable {
@SuppressWarnings("deprecation")
private void run() {
//Get the Max offset from each RDD Partitions. Each RDD Partition belongs to
One Kafka Partition
JavaPairDStream<Integer, Iterable<Long>> partitonOffset =
ProcessedOffsetManager
.getPartitionOffset(unionStreams, props);
rdd.foreachPartition(new VoidFunction<Iterator<MessageAndMetadata<byte[]>>>()
{
@Override
public void call(Iterator<MessageAndMetadata<byte[]>> mmItr)
throws Exception {
while(mmItr.hasNext()) {
MessageAndMetadata<byte[]> mm = mmItr.next();
byte[] key = mm.getKey();
byte[] value = mm.getPayload();
if(key != null)
System.out.println(" key :" + new String(key));
if(value != null)
System.out.println(" Value :" + new
String(value));
}
});
}
});
//End Application Logic
try {
jsc.start();
jsc.awaitTermination();
}catch (Exception ex ) {
jsc.ssc().sc().cancelAllJobs();
jsc.stop(true, false);
System.exit(-1);
}
}