I was digging into the possibilities for Websphere MQ as a data source for spark-streaming becuase it is needed in one of our use case.
I got to know that MQTT is the protocol that supports the communication from MQ data structures but since I am a newbie to spark streaming I need some working examples for the same.
Did anyone try to connect the MQ with spark streaming. Please devise the best way for doing so.
So, I am posting here the working code for CustomMQReceiver which connects the Websphere MQ and reads data :
public class CustomMQReciever extends Receiver<String> { String host = null;
int port = -1;
String qm=null;
String qn=null;
String channel=null;
transient Gson gson=new Gson();
transient MQQueueConnection qCon= null;
Enumeration enumeration =null;
public CustomMQReciever(String host , int port, String qm, String channel, String qn) {
super(StorageLevel.MEMORY_ONLY_2());
this.host = host;
this.port = port;
this.qm=qm;
this.qn=qn;
this.channel=channel;
}
public void onStart() {
// Start the thread that receives data over a connection
new Thread() {
#Override public void run() {
try {
initConnection();
receive();
}
catch (JMSException ex)
{
ex.printStackTrace();
}
}
}.start();
}
public void onStop() {
// There is nothing much to do as the thread calling receive()
// is designed to stop by itself isStopped() returns false
}
/** Create a MQ connection and receive data until receiver is stopped */
private void receive() {
System.out.print("Started receiving messages from MQ");
try {
JMSMessage receivedMessage= null;
while (!isStopped() && enumeration.hasMoreElements() )
{
receivedMessage= (JMSMessage) enumeration.nextElement();
String userInput = convertStreamToString(receivedMessage);
//System.out.println("Received data :'" + userInput + "'");
store(userInput);
}
// Restart in an attempt to connect again when server is active again
//restart("Trying to connect again");
stop("No More Messages To read !");
qCon.close();
System.out.println("Queue Connection is Closed");
}
catch(Exception e)
{
e.printStackTrace();
restart("Trying to connect again");
}
catch(Throwable t) {
// restart if there is any other error
restart("Error receiving data", t);
}
}
public void initConnection() throws JMSException
{
MQQueueConnectionFactory conFactory= new MQQueueConnectionFactory();
conFactory.setHostName(host);
conFactory.setPort(port);
conFactory.setTransportType(JMSC.MQJMS_TP_CLIENT_MQ_TCPIP);
conFactory.setQueueManager(qm);
conFactory.setChannel(channel);
qCon= (MQQueueConnection) conFactory.createQueueConnection();
MQQueueSession qSession=(MQQueueSession) qCon.createQueueSession(false, 1);
MQQueue queue=(MQQueue) qSession.createQueue(qn);
MQQueueBrowser browser = (MQQueueBrowser) qSession.createBrowser(queue);
qCon.start();
enumeration= browser.getEnumeration();
}
#Override
public StorageLevel storageLevel() {
return StorageLevel.MEMORY_ONLY_2();
}
}
I believe you can use JMS to connect to connect Websphere MQ, and Apache Camel can be used to connect to Websphere MQ. You can create a custom Receiver like so (note that this pattern could also be used without JMS):
class JMSReceiver(topicName: String, cf: String, jndiProviderURL: String)
extends Receiver[String](StorageLevel.MEMORY_AND_DISK_SER) with Serializable {
//Transient as this will get passed to the Workers from the Driver
#transient
var camelContextOption: Option[DefaultCamelContext] = None
def onStart() = {
camelContextOption = Some(new DefaultCamelContext())
val camelContext = camelContextOption.get
val env = new Properties()
env.setProperty("java.naming.factory.initial", "???")
env.setProperty("java.naming.provider.url", jndiProviderURL)
env.setProperty("com.webmethods.jms.clientIDSharing", "true")
val namingContext = new InitialContext(env); //using the properties file to create context
//Lookup Connection Factory
val connectionFactory = namingContext.lookup(cf).asInstanceOf[javax.jms.ConnectionFactory]
camelContext.addComponent("jms", JmsComponent.jmsComponentAutoAcknowledge(connectionFactory))
val builder = new RouteBuilder() {
def configure() = {
from(s"jms://topic:$topicName?jmsMessageType=Object&clientId=$clientId&durableSubscriptionName=${topicName}_SparkDurable&maxConcurrentConsumers=10")
.process(new Processor() {
def process(exchange: Exchange) = {
exchange.getIn.getBody match {
case s: String => store(s)
}
}
})
}
}
}
builders.foreach(camelContext.addRoutes)
camelContext.start()
}
def onStop() = if(camelContextOption.isDefined) camelContextOption.get.stop()
}
You can then create a DStream of your events like so:
val myDStream = ssc.receiverStream(new JMSReceiver("MyTopic", "MyContextFactory", "MyJNDI"))
Related
I could create a server lease to a single client as follows:
#Slf4j
public class LeaseServer {
private static final String SERVER_TAG = "server";
public static void main(String[] args) throws InterruptedException {
// Queue for incoming messages represented as Flux
// Imagine that every fireAndForget that is pushed is processed by a worker
int queueCapacity = 50;
BlockingQueue<String> messagesQueue = new ArrayBlockingQueue<>(queueCapacity);
// emulating a worker that process data from the queue
Thread workerThread =
new Thread(
() -> {
try {
while (!Thread.currentThread().isInterrupted()) {
String message = messagesQueue.take();
System.out.println("consume message:" + message);
Thread.sleep(100000); // emulating processing
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
workerThread.start();
CloseableChannel server = getFireAndForgetServer(messagesQueue, workerThread);
TimeUnit.MINUTES.sleep(10);
server.dispose();
}
private static CloseableChannel getFireAndForgetServer(BlockingQueue<String> messagesQueue, Thread workerThread) {
CloseableChannel server =
RSocketServer.create((setup, sendingSocket) ->
Mono.just(new RSocket() {
#Override
public Mono<Void> fireAndForget(Payload payload) {
// add element. if overflows errors and terminates execution
// specifically to show that lease can limit rate of fnf requests in
// that example
try {
if (!messagesQueue.offer(payload.getDataUtf8())) {
System.out.println("Queue has been overflowed. Terminating execution");
sendingSocket.dispose();
workerThread.interrupt();
}
} finally {
payload.release();
}
return Mono.empty();
}
}))
.lease(() -> Leases.create().sender(new LeaseCalculator(SERVER_TAG, messagesQueue)))
.bindNow(TcpServerTransport.create("localhost", 7000));
return server;
}
}
But how do I issue a lease to multiple clients connected to that server?
Otherwise my queue will be written multiple times by multiple clients, resulting in an overflow of the service.
I can't find the details in the public documents and materials.
Your help was very much appreciated.
Listener
#JmsListener(destination = "${servicebus.entities.acsTopicToListen.entityName}", containerFactory = "topicJmsListenerContainerFactory", subscription = "${servicebus.entities.acsTopicToListen.subscriptionName}")
public void run(byte[] message, Session session) throws Exception {
try {
acsDataHandler.messageProcessor(new String(message));
} catch (Exception ex) {
LOGGER.error("Exception thrown while listening to acsDataTopic...." + ex.getMessage());
exceptionHelper.handleTransformError(INTERNAL_SERVER_ERROR, "Error from AcsDataReceiver listen()",
ACS0001.name(), ex);
}
Configuration
#Bean
public ConnectionFactory schedulerConnectionFactory(ServicebusConnectionProperties serviceBusJMSProperties) {
final String connectionString = serviceBusJMSProperties.getConnectionString();
final String clientId = serviceBusJMSProperties.getTopiClientId();
final int idleTimeout = serviceBusJMSProperties.getIdleTimeout();
final ServiceBusKey serviceBusKey = ConnectionStringResolver.getServiceBusKey(connectionString);
final String host = serviceBusKey.getHost();
final String sasKeyName = serviceBusKey.getSharedAccessKeyName();
final String sasKey = serviceBusKey.getSharedAccessKey();
final String remoteUri = String.format(AMQP_URI_FORMAT, host, idleTimeout);
final JmsConnectionFactory jmsConnectionFactory = new JmsConnectionFactory();
jmsConnectionFactory.setRemoteURI(remoteUri);
jmsConnectionFactory.setClientID(clientId);
jmsConnectionFactory.setUsername(sasKeyName);
jmsConnectionFactory.setPassword(sasKey);
return new CachingConnectionFactory(jmsConnectionFactory);
}
#Bean
public Destination destination() {
return new JmsTopic(destination);
}
#Bean
public JmsTemplate jmsTemplate(ConnectionFactory jmsConnectionFactory, Destination destination) {
final JmsTemplate jmsTemplate = new JmsTemplate();
jmsTemplate.setConnectionFactory(jmsConnectionFactory);
jmsTemplate.setMessageIdEnabled(true);
jmsTemplate.setDefaultDestination(destination);
return jmsTemplate;
}
#Bean
public JmsListenerContainerFactory<?> topicJmsListenerContainerFactory(ConnectionFactory connectionFactory) {
final DefaultJmsListenerContainerFactory jmsListenerContainerFactory = new DefaultJmsListenerContainerFactory();
jmsListenerContainerFactory.setConnectionFactory(connectionFactory);
jmsListenerContainerFactory.setSubscriptionDurable(Boolean.TRUE);
jmsListenerContainerFactory.setSessionAcknowledgeMode(Session.CLIENT_ACKNOWLEDGE);
return jmsListenerContainerFactory;
}
I am using Azure Service Bus Spring Boot Starter to connect the Servicebus Topic/Subscription which is session enabled , But It's unable to connect with the message below :
It is not possible for an entity that requires sessions to create a non-sessionful message receiver.
In Java, the session support works with azure-servicebus library example
QueuesGettingStarted.java by changing queueClient.registerMessageHandler to queueClient.registerSessionHandler and relevant changes.
But in this case please check :
https://github.com/Azure/azure-service-bus/issues/326#issuecomment-573236250
https://github.com/MicrosoftDocs/azure-dev-docs/issues/285#issuecomment-699573311
I am having an issue if server getting switched between A to B. If A is down and B is up. Using FailoverClientConnectionFactory and I can see below warning while sending any request to server.
WARN o.s.i.i.t.c.TcpMessageMapper - Null payload from connection cisco-x.x.x.x:42017:58773:0e07144c-456e-4e31-a93c-66926b8f7018
I am using FailoverClientConnectionFactory with like two of the servers. its working if fist server is up and running. But not working if its getting switched to B server.
Here is the code snippet:
#Bean
public FailoverClientConnectionFactory getFailoverClientConnectionFactory() throws CiscoConnectionFailureException
{
FailoverClientConnectionFactory failoverCF = new FailoverClientConnectionFactory(underlyingCF());
failoverCF.setSingleUse(false);
failoverCF.afterPropertiesSet();
return failoverCF;
}
#Bean
public List<AbstractClientConnectionFactory> underlyingCF() throws CiscoConnectionFailureException
{
List<ServerInfo> processServers = mConfigurationService.getProcesservers();
List<AbstractClientConnectionFactory> factories = new ArrayList<>();
boolean connectionSuccessful = false;
for (ServerInfo serverInfo : processServers)
{
AbstractClientConnectionFactory clientFactory = new TcpNetClientConnectionFactory(
serverInfo.getServerAddress(), serverInfo.getServerPort());
clientFactory.setSingleUse(false);
clientFactory.setDeserializer(mMessageSerializerDeserializer);
clientFactory.setSerializer(mMessageSerializerDeserializer);
factories.add(clientFactory);
}
return factories;
}
#Bean(name = "outAdapter.client")
#ServiceActivator(inputChannel = "input", requiresReply = "true")
public TcpSendingMessageHandler getOutboundChannelAdapter(
FailoverClientConnectionFactory failoverClientConnectionFactory)
{
TcpSendingMessageHandler outboundChannelAdapter = new TcpSendingMessageHandler();
outboundChannelAdapter.setOrder(OUTBOUND_ADAPTOR_ORDER);
outboundChannelAdapter.setConnectionFactory(failoverClientConnectionFactory);
outboundChannelAdapter.setClientMode(true);
return outboundChannelAdapter;
}
Can anyone having an idea what is the issue?
I have a Spring Integration configuration that utilizes a priority channel. When an item is read from that channel, local resources are checked at that point in time, and if the resources are not available to process the item, I would like to requeue the message so that another machine picks it up. Originally, I wrongly threw an exception thinking that a requeue would occur, but as was answered in my other question this is not going to work since the priority channel executes in another thread than the listener container.
I thought about placing a filter right after the inbound channel adapter, and throwing an exception if resources are not available at that time, but at that instance in time an accurate assessment of resources cannot be made because resource availability at that time does match what will be available when the message is selected based upon priority.
My next thought is to place a filter after the priority channel and before the service activator and direct messages that cannot be handled by current resources to the discard-channel which is defined as an outbound channel adapter that sends the message back to the original queue. Are there pitfalls to this approach?
EDIT 20150917:
Per Gary's advice, I have moved to RabbitMQ 3.5.x in order to take of the built-in priority queues. I now have a problem tracking the number of attempts as it appears my original message is placed back on the queue, rather than my modified message. I have updated the code blocks to reflect the current setup.
EDIT 20150922:
I am updating this post to reflect the final proof of concept code base that I created. I am not a Spring-Integration expert by any means, so please keep that in mind as well as the fact that this test code is not production ready. My original intent was to have messages resubmitted and retried a certain amount of times if a particular exception was thrown. This can be accomplished using the StatefulRetryOperationsInterceptor. But to experiment further, I wanted to be able to set/increment a header on failure and then have something in my flow that could react to that value. That was accomplished by using an extension of the RepublishMessageRecoverer that overrides additionalHeaders(). This object then is used to configure the RetryOperationsInterceptor.
One other minor thing: I wanted to reduce some of the default Spring Integration logging when my signal exception was thrown, so I needed to make sure I named my error channel "errorChannel" in order to replace the Spring Integration default. I also needed to create a custom ErrorHandler which to assign to the ListenerContainer default which logs everything to ERROR level.
Here is my current setup:
Spring Integration 4.2.0.RELEASE
Spring AMQP 1.5.0.RELEASE
RabbitMQ 3.5.x
Configuration
#Autowired
public void setSpringIntegrationConfigHelper (SpringIntegrationHelper springIntegrationConfigHelper) {
this.springIntegrationConfigHelper = springIntegrationConfigHelper;
}
#Bean
public String priorityPOCQueueName() {
return "poc.priority";
}
#Bean
public Queue priorityPOCQueue(RabbitAdmin rabbitAdmin) {
boolean durable = true;
boolean exclusive = false;
boolean autoDelete = false;
//Adding the x-max-priority argument is what signals RabbitMQ that this is a priority queue. Must be Rabbit 3.5.x
Map<String,Object> arguments = new HashMap<String, Object>();
arguments.put("x-max-priority", 5);
Queue queue = new Queue(priorityPOCQueueName(),
durable,
exclusive,
autoDelete,
arguments);
rabbitAdmin.declareQueue(queue);
return queue;
}
#Bean
public Binding priorityPOCQueueBinding(RabbitAdmin rabbitAdmin) {
Binding binding = new Binding(priorityPOCQueueName(),
DestinationType.QUEUE,
"amq.direct",
priorityPOCQueue(rabbitAdmin).getName(),
null);
rabbitAdmin.declareBinding(binding);
return binding;
}
#Bean
public AmqpTemplate priorityPOCMessageTemplate(ConnectionFactory amqpConnectionFactory,
#Qualifier("priorityPOCQueueName") String queueName,
#Qualifier("jsonMessageConverter") MessageConverter messageConverter) {
RabbitTemplate template = new RabbitTemplate(amqpConnectionFactory);
template.setChannelTransacted(false);
template.setExchange("amq.direct");
template.setQueue(queueName);
template.setRoutingKey(queueName);
template.setMessageConverter(messageConverter);
return template;
}
#Autowired
#Qualifier("priorityPOCQueue")
public void setPriorityPOCQueue(Queue priorityPOCQueue) {
this.priorityPOCQueue = priorityPOCQueue;
}
#Bean
public MessageRecoverer miTestMessageRecoverer(final AmqpTemplate priorityPOCMessageTemplate) {
return new MessageRecoverer() {
#Override
public void recover(org.springframework.amqp.core.Message msg, Throwable t) {
StringBuilder sb = new StringBuilder();
sb.append("Firing Test Recoverer: ").append(t.getClass().getName()).append(" Message Count: ")
.append(msg.getMessageProperties().getMessageCount())
.append(" ID: ").append(msg.getMessageProperties().getMessageId())
.append(" DeliveryTag: ").append(msg.getMessageProperties().getDeliveryTag())
.append(" Redilivered: ").append(msg.getMessageProperties().isRedelivered());
logger.debug(sb.toString());
PriorityMessage m = new PriorityMessage(5);
m.setId(randomGenerator.nextLong(10L, 1000000L));
priorityPOCMessageTemplate.convertAndSend(m , new SimulateErrorHeaderPostProcessor(Boolean.FALSE, m.getPriority()));
}
};
}
#Bean
public RepublishMessageRecoverer miRepublishRecoverer(final AmqpTemplate priorityPOCMessageTemplate) {
class MiRecoverer extends RepublishMessageRecoverer {
public MiRecoverer(AmqpTemplate errorTemplate) {
super(errorTemplate);
this.setErrorRoutingKeyPrefix("");
}
#Override
protected Map<? extends String, ? extends Object> additionalHeaders(
org.springframework.amqp.core.Message message, Throwable cause) {
Map<String, Object> map = new HashMap<>();
if (message.getMessageProperties().getHeaders().containsKey("jmattempts") == false) {
map.put("jmattempts", 0);
} else {
Integer count = Integer.valueOf(message.getMessageProperties().getHeaders().get("jmattempts").toString());
map.put("jmattempts", ++count);
}
return map;
}
} ;
return new MiRecoverer(priorityPOCMessageTemplate);
}
#Bean
public StatefulRetryOperationsInterceptor inadequateResourceInterceptor(#Qualifier("priorityPOCMessageTemplate") AmqpTemplate priorityPOCMessageTemplate
, #Qualifier("priorityMessageKeyGenerator") PriorityMessageKeyGenerator priorityMessageKeyGenerator
, #Qualifier("miTestMessageRecoverer") MessageRecoverer messageRecoverer
, #Qualifier("miRepublishRecoverer") RepublishMessageRecoverer miRepublishRecoverer) {
StatefulRetryInterceptorBuilder b = RetryInterceptorBuilder.stateful();
return b.maxAttempts(2)
.backOffOptions(2000L, 1.0D, 4000L)
.messageKeyGenerator(priorityMessageKeyGenerator)
.recoverer(miRepublishRecoverer)
.build();
}
#Bean(name="exec.priorityPOC")
TaskExecutor taskExecutor() {
ThreadPoolTaskExecutor e = new ThreadPoolTaskExecutor();
e.setCorePoolSize(1);
e.setQueueCapacity(1);
return e;
}
/* #Bean(name="poc.priorityChannel")
public MessageChannel pocPriorityChannel() {
PriorityChannel c = new PriorityChannel(new PriorityComparator());
c.setComponentName("poc.priorityChannel");
c.setBeanName("poc.priorityChannel");
return c;
}
*/
#Bean(name="poc.inputChannel")
public MessageChannel pocPriorityChannel() {
DirectChannel c = new DirectChannel();
c.setComponentName("poc.inputChannel");
c.setBeanName("poc.inputChannel");
return c;
}
#Bean(name="poc.inboundChannelAdapter") //make this a unique name
public AmqpInboundChannelAdapter amqpInboundChannelAdapter(#Qualifier("exec.priorityPOC") TaskExecutor taskExecutor
, #Qualifier("errorChannel") MessageChannel pocErrorChannel
, #Qualifier("inadequateResourceInterceptor") StatefulRetryOperationsInterceptor inadequateResourceInterceptor) {
org.aopalliance.aop.Advice[] adviceChain = new org.aopalliance.aop.Advice[]{inadequateResourceInterceptor};
int concurrentConsumers = 1;
AmqpInboundChannelAdapter a = springIntegrationConfigHelper.createInboundChannelAdapter(taskExecutor
, pocPriorityChannel(), new Queue[]{priorityPOCQueue}, concurrentConsumers, adviceChain
, new PocErrorHandler());
a.setErrorChannel(pocErrorChannel);
return a;
}
#Transformer(inputChannel = "poc.inputChannel", outputChannel = "poc.procesPoc")
public Message<PriorityMessage> incrementAttempts(Message<PriorityMessage> msg) {
//I stopped using this in the POC.
return msg;
}
#ServiceActivator(inputChannel="poc.procesPoc")
public void procesPoc(#Header(SimulateErrorHeaderPostProcessor.ERROR_SIMULATE_HEADER_KEY) Boolean simulateError
, #Headers Map<String, Object> headerMap
, PriorityMessage priorityMessage) throws InterruptedException {
if (isFirstMessageReceived == false) {
//Thread.sleep(15000); //Cause a bit of a backup so we can see prioritizing in action.
isFirstMessageReceived = true;
}
Integer retryAttempts = 0;
if (headerMap.containsKey("jmattempts")) {
retryAttempts = Integer.valueOf(headerMap.get("jmattempts").toString());
}
logger.debug("Received message with priority: " + priorityMessage.getPriority() + ", simulateError: " + simulateError + ", Current attempts count is "
+ retryAttempts);
if (simulateError && retryAttempts < PriorityMessage.MAX_MESSAGE_RETRY_COUNT) {
logger.debug(" Simulating an error and re-queue'ng. Current attempt count is " + retryAttempts);
throw new AnalyzerNonAdequateResourceException();
} else if (simulateError && retryAttempts > PriorityMessage.MAX_MESSAGE_RETRY_COUNT) {
logger.debug(" Max attempt count exceeded");
}
}
/**************************************************************************************************
*
* Error Channel
*
**************************************************************************************************/
//Note that we want to override default Spring error channel, so the name of the bean must be errorChannel
#Bean(name="errorChannel")
public MessageChannel pocErrorChannel() {
DirectChannel c = new DirectChannel();
c.setComponentName("errorChannel");
c.setBeanName("errorChannel");
return c;
}
#ServiceActivator(inputChannel="errorChannel")
public void pocHandleError(Message<MessagingException> message) throws Throwable {
MessagingException me = message.getPayload();
logger.error("pocHandleError: error encountered: " + me.getCause().getClass().getName());
SortedMap<String, Object> sorted= new TreeMap<>();
sorted.putAll(me.getFailedMessage().getHeaders());
if (me.getCause() instanceof AnalyzerNonAdequateResourceException) {
logger.debug("Headers: " + sorted.toString());
//Let this message get requeued
throw me.getCause();
}
Message<?> failedMsg = me.getFailedMessage();
Object o = failedMsg.getPayload();
StringBuilder sb = new StringBuilder();
if (o != null) {
sb.append("AnalyzerErrorHandler: Failed Message Type: ")
.append(o.getClass().getCanonicalName()).append(". toString: ").append(o.toString());
logger.error(sb.toString());
}
//The first level sometimes brings back either MessagingHandlingException or
//MessagingTransformationException which may contain a subcause
Exception e = (Exception)me.getCause();
int i = 0;
sb.delete(0, sb.length());
sb.append("AnalyzerErrorHandler nested messages: ");
while (e != null && i++ < 10) {
sb.append(System.lineSeparator()).append(" ")
.append(e.getClass().getCanonicalName()).append(": ")
.append(e.getMessage());
}
if (i > 0) {
logger.error(sb.toString());
}
//Don't want a message to recycle
throw new AmqpRejectAndDontRequeueException(e);
}
/**
* This gets set on the ListenerContainer. The default handler on the listener
* container logs everything with full stack trace. We don't want to do that
* for our known resource exception
*/
public static class PocErrorHandler implements ErrorHandler {
#Override
public void handleError(Throwable t) {
Throwable cause = t.getCause();
if (cause != null) {
while (cause.getCause() != null) {
cause = cause.getCause();
}
} else {
cause = t;
}
if (cause instanceof AnalyzerNonAdequateResourceException) {
logger.info(AnalyzerNonAdequateResourceException.class.getName() + ": not enough resources to process the item.");
return;
}
else {
logger.error("POC Listener Exception", t);
}
}
}
SpringIntegrationHelper
protected ConnectionFactory connectionFactory;
protected MessageConverter messageConverter;
#Autowired
public void setConnectionFactory (ConnectionFactory connectionFactory) {
this.connectionFactory = connectionFactory;
}
#Autowired
public void setMessageConverter(#Qualifier("jsonMessageConverter") MessageConverter messageConverter) {
this.messageConverter = messageConverter;
}
public AmqpInboundChannelAdapter createInboundChannelAdapter(TaskExecutor taskExecutor
, MessageChannel outputChannel, Queue[] queues, int concurrentConsumers
, org.aopalliance.aop.Advice[] adviceChain,
ErrorHandler errorHandler) {
SimpleMessageListenerContainer listenerContainer =
new SimpleMessageListenerContainer(connectionFactory);
//AUTO is default, but setting it anyhow.
listenerContainer.setAcknowledgeMode(AcknowledgeMode.AUTO);
listenerContainer.setAutoStartup(true);
listenerContainer.setConcurrentConsumers(concurrentConsumers);
listenerContainer.setMessageConverter(messageConverter);
listenerContainer.setQueues(queues);
//listenerContainer.setChannelTransacted(false);
listenerContainer.setErrorHandler(errorHandler);
listenerContainer.setPrefetchCount(1);
listenerContainer.setTaskExecutor(taskExecutor);
listenerContainer.setDefaultRequeueRejected(true);
if (adviceChain != null && adviceChain.length > 0) {
listenerContainer.setAdviceChain(adviceChain);
}
AmqpInboundChannelAdapter a = new AmqpInboundChannelAdapter(listenerContainer);
a.setMessageConverter(messageConverter);
a.setAutoStartup(true);
a.setHeaderMapper(MyAmqpHeaderMapper.createPassAllHeaders());
a.setOutputChannel(outputChannel);
return a;
}
It's not clear why you want to use a PriorityChannel in this context; why not use a priority queue in RabbitMQ? That way, you can run your flow on the container thread.
Sending the queue to the back of the queue yourself would work, but there is a risk of message loss.
For the life of me I have been unable to get RPC with RabbitMQ working with temp replyto queues. Below is a simple example derived from this test. I see bunch of exceptions in my output window and the dlq fills up, but the message is never acknowledged.
namespace ConsoleApplication4
{
class Program
{
public static IMessageService CreateMqServer(int retryCount = 1)
{
return new RabbitMqServer { RetryCount = retryCount };
}
static void Main(string[] args)
{
using (var mqServer = CreateMqServer())
{
mqServer.RegisterHandler<HelloIntro>(m =>
new HelloIntroResponse { Result = "Hello, {0}!".Fmt(m.GetBody().Name) });
mqServer.Start();
}
Console.WriteLine("ConsoleAppplication4");
Console.ReadKey();
}
}
}
namespace ConsoleApplication5
{
class Program
{
public static IMessageService CreateMqServer(int retryCount = 1)
{
return new RabbitMqServer { RetryCount = retryCount };
}
static void Main(string[] args)
{
using (var mqServer = CreateMqServer())
{
using (var mqClient = mqServer.CreateMessageQueueClient())
{
var replyToMq = mqClient.GetTempQueueName();
mqClient.Publish(new Message<HelloIntro>(new HelloIntro { Name = "World" })
{
ReplyTo = replyToMq
});
IMessage<HelloIntroResponse> responseMsg = mqClient.Get<HelloIntroResponse>(replyToMq);
mqClient.Ack(responseMsg);
}
}
Console.WriteLine("ConsoleAppplication5");
Console.ReadKey();
}
}
}
First exception
RabbitMQ.Client.Exceptions.OperationInterruptedException occurred
_HResult=-2146233088
_message=The AMQP operation was interrupted: AMQP close-reason, initiated by Peer, code=405, text="RESOURCE_LOCKED - cannot obtain exclusive access to locked queue 'mq:tmp:10dd20804ee546d6bf5a3512f66143ec' in vhost '/'", classId=50, methodId=20, cause=
HResult=-2146233088
IsTransient=false
Message=The AMQP operation was interrupted: AMQP close-reason, initiated by Peer, code=405, text="RESOURCE_LOCKED - cannot obtain exclusive access to locked queue 'mq:tmp:10dd20804ee546d6bf5a3512f66143ec' in vhost '/'", classId=50, methodId=20, cause=
Source=RabbitMQ.Client
StackTrace:
at RabbitMQ.Client.Impl.SimpleBlockingRpcContinuation.GetReply()
at RabbitMQ.Client.Impl.ModelBase.ModelRpc(MethodBase method, ContentHeaderBase header, Byte[] body)
at RabbitMQ.Client.Framing.Impl.v0_9_1.Model._Private_QueueBind(String queue, String exchange, String routingKey, Boolean nowait, IDictionary`2 arguments)
at RabbitMQ.Client.Impl.ModelBase.QueueBind(String queue, String exchange, String routingKey, IDictionary`2 arguments)
at RabbitMQ.Client.Impl.ModelBase.QueueBind(String queue, String exchange, String routingKey)
at ServiceStack.RabbitMq.RabbitMqExtensions.RegisterQueue(IModel channel, String queueName)
at ServiceStack.RabbitMq.RabbitMqExtensions.RegisterQueueByName(IModel channel, String queueName)
at ServiceStack.RabbitMq.RabbitMqProducer.PublishMessage(String exchange, String routingKey, IBasicProperties basicProperties, Byte[] body)
InnerException:
followed by this one
System.Threading.ThreadInterruptedException occurred
_HResult=-2146233063
_message=Thread was interrupted from a waiting state.
HResult=-2146233063
IsTransient=true
Message=Thread was interrupted from a waiting state.
Source=mscorlib
StackTrace:
at System.Threading.Monitor.ObjWait(Boolean exitContext, Int32 millisecondsTimeout, Object obj)
at System.Threading.Monitor.Wait(Object obj, Int32 millisecondsTimeout, Boolean exitContext)
InnerException:
Then it repeat for a number of times and hangs. This particular post seems to suggest that they were able to achieve some sort of success with ServerStack and RabbitMQ RPC, but before I start changing my code I'd like to know the reason that my code doesn't work.
Thank you,
Stephen
When your client call GetTempQueueName(), it creates an exclusive queue, which cannot be accessed from another connection (i.e. your server).
Therefore I created my own simple mq-client which does not use servicestack's mq client and only depends on rabbitmq's .net-library:
public class MqClient : IDisposable
{
ConnectionFactory factory = new ConnectionFactory()
{
HostName = "192.168.97.201",
UserName = "guest",
Password = "guest",
//VirtualHost = "test",
Port = AmqpTcpEndpoint.UseDefaultPort,
};
private IConnection connection;
private string exchangeName;
public MqClient(string defaultExchange)
{
this.exchangeName = defaultExchange;
this.connection = factory.CreateConnection();
}
public TResponse RpcCall<TResponse>(IReturn<TResponse> reqDto, string exchange = null)
{
using (var channel = connection.CreateModel())
{
string inq_queue_name = string.Format("mq:{0}.inq", reqDto.GetType().Name);
string responseQueueName = channel.QueueDeclare("",false,false,true,null).QueueName;
//string responseQueueName = channel.QueueDeclare().QueueName;
var props = channel.CreateBasicProperties();
props.ReplyTo = responseQueueName;
var message = ServiceStack.Text.JsonSerializer.SerializeToString(reqDto);
channel.BasicPublish(exchange ?? this.exchangeName, inq_queue_name, props, UTF8Encoding.UTF8.GetBytes(message));
var consumer = new QueueingBasicConsumer(channel);
channel.BasicConsume(responseQueueName, true, consumer);
var ea = (BasicDeliverEventArgs)consumer.Queue.Dequeue();
//channel.BasicAck(ea.DeliveryTag, false);
string response = UTF8Encoding.UTF8.GetString(ea.Body);
string responseType = ea.BasicProperties.Type;
Console.WriteLine(" [x] New Message of Type '{1}' Received:{2}{0}", response, responseType, Environment.NewLine);
return ServiceStack.Text.JsonSerializer.DeserializeFromString<TResponse>(response);
}
}
~MqClient()
{
this.Dispose();
}
public void Dispose()
{
if (connection != null)
{
this.connection.Dispose();
this.connection = null;
}
}
}
It can be used like that:
using (var mqClient = new MqClient("mx.servicestack"))
{
var pingResponse = mqClient.RpcCall<PingResponse>(new Ping { });
}
Important: You've got to use servicestack version 4.0.32+.
There was an issue with redeclaring an exclusive queue which is no longer being done in this commit.
There's also a new RabbitMqTest project showcasing a simple working Client/Server example communicating via 2 independent Console Applications.
This change is available from v4.0.34+ that's now on MyGet.
The ServiceStack.RabbitMq package RabbitMq.Client NuGet dependency has also been upgraded to v3.4.0.