Spring Integration Load Balance to JMS Queues - spring-integration

I would like to take JMS msgs from a single input queue and fan them out onto N output queues.
I have a simple flow that will forward messages to a single destination but can not figure out how to apply LoadBalancer to allow for multiple destinations in round-robin fashion.
Any ideas how to do this?
#Configuration
public class TestLoadBalance {
public static final String INPUT_QUEUE = "_dev.lb.input";
public static final String OUTPUT_QUEUE_PREFIX = "_dev.lb.output-";
#Bean
public IntegrationFlow testLoadBalanceFlow(
ConnectionFactory jmsConnectionFactory) {
IntegrationFlow flow = IntegrationFlows.from(
Jms.messageDrivenChannelAdapter(jmsConnectionFactory)
.destination(INPUT_QUEUE)
)
.handle(buildOutput(jmsConnectionFactory, 1))
// cant have 2nd handle. gets warn & flow end:
// The 'currentComponent' (org.springframework.integration.jms.JmsSendingMessageHandler#516462cc)
// is a one-way 'MessageHandler' and it isn't appropriate to configure 'outputChannel'
//.handle(buildOutput(jmsConnectionFactory, 2))
.get();
return flow;
}
private JmsSendingMessageHandler buildOutput(ConnectionFactory jmsConnectionFactory, int i){
return Jms.outboundAdapter(jmsConnectionFactory)
.destination(OUTPUT_QUEUE_PREFIX + i).get();
}
}

There are a couple of ways to do it; you can either have multiple subscribers on the channel...
#Bean
public IntegrationFlow inbound(ConnectionFactory cf) {
return IntegrationFlows.from(Jms.messageDrivenChannelAdapter(cf)
.destination("foo"))
.channel(roundRobin())
.get();
}
#Bean
public DirectChannel roundRobin() {
return new DirectChannel();
}
#Bean
public IntegrationFlow outbound1(ConnectionFactory cf) {
return IntegrationFlows.from(roundRobin())
.bridge() // otherwise log() will wire tap the roundRobin channel
.log()
.log(new LiteralExpression("Sending to bar"))
.handle(Jms.outboundAdapter(cf)
.destination("bar"))
.get();
}
#Bean
public IntegrationFlow outbound2(ConnectionFactory cf) {
return IntegrationFlows.from(roundRobin())
.bridge() // otherwise log() will wire tap the roundRobin channel
.log()
.log(new LiteralExpression("Sending to baz"))
.handle(Jms.outboundAdapter(cf)
.destination("baz"))
.get();
}
Or, you can use a destination expression:
#Bean
public AtomicInteger toggle() {
return new AtomicInteger();
}
#Bean
public IntegrationFlow inbound(ConnectionFactory cf) {
return IntegrationFlows.from(Jms.messageDrivenChannelAdapter(cf)
.destination("foo"))
.handle(Jms.outboundAdapter(cf)
.destinationExpression("#toggle.getAndIncrement() % 2 == 0 ? 'bar' : 'baz'"))
.get();
}
#JmsListener(destination = "bar")
public void bar(String in) {
System.out.println("received " + in + " from bar");
}
#JmsListener(destination = "baz")
public void baz(String in) {
System.out.println("received " + in + " from baz");
}
Result:
received test1 from bar
received test2 from baz

Based on Gary's examples I went with the destinationExpression approach as:
#Configuration
public class TestLoadBalance {
public static final String INPUT_QUEUE = "_dev.lb.input";
public static final String OUTPUT_QUEUE_PREFIX = "_dev.lb.output-";
#Bean
public JmsDestinationPartitioner partitioner() {
return new JmsDestinationPartitioner(OUTPUT_QUEUE_PREFIX,1,3);
}
#Bean
public IntegrationFlow testLoadBalanceFlow(
ConnectionFactory jmsConnectionFactory) {
IntegrationFlow flow = IntegrationFlows.from(
Jms.messageDrivenChannelAdapter(jmsConnectionFactory)
.destination(INPUT_QUEUE)
)
.handle(Jms.outboundAdapter((jmsConnectionFactory))
.destinationExpression("#partitioner.nextDestination()"))
.get();
return flow;
}
}
With a wrapper around AtomicInt to handling naming with a prefix:
public class JmsDestinationPartitioner {
private int min;
private int max;
private String prefix;
private AtomicInteger current;
public JmsDestinationPartitioner(String prefix, int min, int max){
this.prefix = prefix;
this.min = min;
this.max = max;
current = new AtomicInteger(min);
}
public int getAndIncrement(){
int i = current.get();
current.getAndIncrement();
if (current.get() > max){
current.set(min);
}
return i;
}
public String nextDestination(){
return prefix + getAndIncrement();
}
}

Related

Spring Integration + Spring Batch: the job doesn`t stop

I want to read file from ftp server, then save it into local repository and delete from server, run the job that read file, find one record on DB, change one parameter and save it.
What is going wrong: job doesn't finish; increments salary and saves employee many times.
Spring Integration configuration:
#Bean
public FtpInboundFileSynchronizer ftpInboundFileSynchronizer(DefaultFtpSessionFactory sessionFactory) {
FtpInboundFileSynchronizer fileSynchronizer = new FtpInboundFileSynchronizer(sessionFactory);
fileSynchronizer.setRemoteDirectory(remoteDirectory);
fileSynchronizer.setDeleteRemoteFiles(true);
return fileSynchronizer;
}
#Bean
#InboundChannelAdapter(value = "fileInputChannel", poller = #Poller(cron = "*/5 * * * * ?"))
public FtpInboundFileSynchronizingMessageSource ftpInboundFileSynchronizingMessageSource(FtpInboundFileSynchronizer fileSynchronizer) throws Exception {
FtpInboundFileSynchronizingMessageSource messageSource = new FtpInboundFileSynchronizingMessageSource(fileSynchronizer);
messageSource.setAutoCreateLocalDirectory(true);
messageSource.setLocalDirectory(new File(localDirectory));
messageSource.setLocalFilter(new AcceptOnceFileListFilter<>());
return messageSource;
}
#Bean
#ServiceActivator(inputChannel = "fileInputChannel")
public FileWritingMessageHandler fileWritingMessageHandler() {
FileWritingMessageHandler messageHandler = new FileWritingMessageHandler(new File(localDirectory));
messageHandler.setOutputChannelName("jobLaunchRequestChannel");
return messageHandler;
}
#ServiceActivator(inputChannel = "jobLaunchRequestChannel", outputChannel = "jobLaunchingGatewayChannel")
public JobLaunchRequest jobLaunchRequest(File file) throws IOException {
String[] content = FileUtils.readFileToString(file, "UTF-8").split("\\s+");
JobParameters jobParameters = new JobParametersBuilder()
.addString("filename", file.getAbsolutePath())
.addString("id", content[0]).addString("salary", content[1])
// .addLong("time", System.currentTimeMillis())
.toJobParameters();
return new JobLaunchRequest(increaseSalaryJob, jobParameters);
}
#Bean
#ServiceActivator(inputChannel = "jobLaunchingGatewayChannel")
public JobLaunchingGateway jobLaunchingGateway(SimpleJobLauncher jobLauncher) {
JobLaunchingGateway jobLaunchingGateway = new JobLaunchingGateway(jobLauncher);
jobLaunchingGateway.setOutputChannelName("finish");
return jobLaunchingGateway;
}
#ServiceActivator(inputChannel = "finish")
public void finish() {
System.out.println("FINISH");
}
}
Spring Batch configuration:
#Bean
public Job increaseSalaryJob(CustomJobListener listener, Step step1) {
return jobBuilderFactory.get("increaseSalaryJob")
.preventRestart()
.listener(listener)
.start(step1)
.build();
}
#Bean
public Step step1(ItemReader<Employee> reader) {
return stepBuilderFactory.get("step1")
.transactionManager(transactionManager)
.<Employee, Employee> chunk(1)
.reader(reader)
.processor(processor())
.writer(writer())
.build();
}
#Bean
#StepScope
public ItemReader<Employee> reader(#Value("#{jobParameters[id]}") Integer id) {
log.info("reader");
return () -> employeeService.get(id);
}
#Bean
#StepScope
public ItemProcessor<Employee, Employee> processor() {
log.info("processor");
return employee -> {
log.info(employee.getName() + " had salary " + employee.getSalary());
Integer salary = employee.getSalary() + 1;
employee.setSalary(salary);
log.info(employee.getName() + " have salary " + employee.getSalary());
return employee;
};
}
#Bean
#StepScope
public ItemWriter<Employee> writer() {
log.info("writer");
return employees -> {
for (Employee employee : employees) {
try {
employeeService.update(employee);
log.info(employee.getName() + " updated with salary " + employee.getSalary());
} catch (ValidationException e) {
e.printStackTrace();
}
}
};
}
#Bean
public MapJobRepositoryFactoryBean jobRepositoryFactoryBean(PlatformTransactionManager transactionManager) {
return new MapJobRepositoryFactoryBean(transactionManager);
}
#Bean
public JobRepository jobRepository(MapJobRepositoryFactoryBean jobRepositoryFactoryBean) throws Exception {
jobRepositoryFactoryBean.setTransactionManager(transactionManager);
return jobRepositoryFactoryBean.getObject();
}
#Bean
public SimpleJobLauncher jobLauncher(JobRepository jobRepository) {
SimpleJobLauncher jobLauncher = new SimpleJobLauncher();
jobLauncher.setJobRepository(jobRepository);
return jobLauncher;
}
I will be glad of any help.
You need to make sure that your reader returns null at some point. This is how the step interprets that there is no more data to process and exits (which in turn will stop the surrounding job if there are no more steps to run).
That said, I see the input of your chunk-oriented step is a single id. For this use case, a simple tasklet is enough, no need for a chunk-oriented tasklet with a single input record and chunkSize=1.

Spring Integration transaction strategy spanning file inbound adapter and queue channel

I have a directory that is being read by an inbound file adapter which is piped into a priority channel that sorts the files by their name. I've created a transaction synchronization factory for moving the files after processing is done which works fine for the inbound adapter and all the transformations/aggregations that are happening in an additional file writer flow. As soon as I add the PriorityChannel, the transaction seems to be finished and it's not being passed to the transformation/aggregration logic.
Here is the inbound flow
return IntegrationFlows
.from(fileReadingMessageSource,
c -> c.poller(Pollers.fixedDelay(period)
.taskExecutor(taskExecutor)
.maxMessagesPerPoll(maxMessagesPerPoll)))
.transactionSynchronizationFactory(transactionSynchronizationFactory())
.transactional(transactionManager())))
.channel("alphabetically")
.bridge(s -> s.poller(Pollers.fixedDelay(100)))
.channel(ApplicationConfiguration.INBOUND_CHANNEL)
.get();
And the transaction synchronization strategy
#Bean
TransactionSynchronizationFactory transactionSynchronizationFactory() {
ExpressionParser parser = new SpelExpressionParser();
ExpressionEvaluatingTransactionSynchronizationProcessor syncProcessor = new ExpressionEvaluatingTransactionSynchronizationProcessor();
syncProcessor.setBeanFactory(applicationContext.getAutowireCapableBeanFactory());
syncProcessor.setAfterCommitExpression(parser.parseExpression(
"payload.renameTo(new java.io.File(#inboundProcessedDirectory.path " + " + T(java.io.File).separator + payload.name))"));
syncProcessor.setAfterRollbackExpression(parser.parseExpression(
"payload.renameTo(new java.io.File(#inboundFailedDirectory.path " + " + T(java.io.File).separator + payload.name))"));
return new DefaultTransactionSynchronizationFactory(syncProcessor);
}
Any idea how to span this transaction in combination with the priority queue channel? Or is there any other way that I could implement reading of files in an alphabetical order?
EDIT1
According to Gary, this should work (providing whole example as asked):
#Configuration
class FilePollingIntegrationFlow {
#Autowired
public File inboundReadDirectory;
#Autowired
private ApplicationContext applicationContext;
#Bean
public IntegrationFlow inboundFileIntegration(#Value("${inbound.file.poller.fixed.delay}") long period,
#Value("${inbound.file.poller.max.messages.per.poll}") int maxMessagesPerPoll, TaskExecutor taskExecutor,
MessageSource<File> fileReadingMessageSource) {
return IntegrationFlows
.from(fileReadingMessageSource,
c -> c.poller(Pollers.fixedDelay(period)
.taskExecutor(taskExecutor)
.maxMessagesPerPoll(maxMessagesPerPoll)
.transactionSynchronizationFactory(transactionSynchronizationFactory())
.transactional(transactionManager())))
.channel(ApplicationConfiguration.INBOUND_CHANNEL)
.get();
}
#Bean
TaskExecutor taskExecutor(#Value("${inbound.file.poller.thread.pool.size}") int poolSize) {
ThreadPoolTaskExecutor taskExecutor = new ThreadPoolTaskExecutor();
taskExecutor.setCorePoolSize(poolSize);
return taskExecutor;
}
#Bean
PseudoTransactionManager transactionManager() {
return new PseudoTransactionManager();
}
#Bean
TransactionSynchronizationFactory transactionSynchronizationFactory() {
ExpressionParser parser = new SpelExpressionParser();
ExpressionEvaluatingTransactionSynchronizationProcessor syncProcessor = new ExpressionEvaluatingTransactionSynchronizationProcessor();
syncProcessor.setBeanFactory(applicationContext.getAutowireCapableBeanFactory());
syncProcessor.setAfterCommitExpression(parser.parseExpression(
"payload.renameTo(new java.io.File(#inboundProcessedDirectory.path " + " + T(java.io.File).separator + payload.name))"));
syncProcessor.setAfterRollbackExpression(parser.parseExpression(
"payload.renameTo(new java.io.File(#inboundFailedDirectory.path " + " + T(java.io.File).separator + payload.name))"));
return new DefaultTransactionSynchronizationFactory(syncProcessor);
}
#Bean
public FileReadingMessageSource fileReadingMessageSource(DirectoryScanner directoryScanner) {
FileReadingMessageSource source = new FileReadingMessageSource();
source.setDirectory(this.inboundReadDirectory);
source.setScanner(directoryScanner);
source.setAutoCreateDirectory(true);
return source;
}
#Bean
public DirectoryScanner directoryScanner(#Value("${inbound.filename.regex}") String regex) {
DirectoryScanner scanner = new RecursiveDirectoryScanner();
CompositeFileListFilter<File> filter = new CompositeFileListFilter<>(
Arrays.asList(new AcceptOnceFileListFilter<>(), new RegexPatternFileListFilter(regex), new AlphabeticalFileListFilter()));
scanner.setFilter(filter);
return scanner;
}
private class AlphabeticalFileListFilter implements FileListFilter<File> {
#Override
public List<File> filterFiles(File[] files) {
List<File> list = Arrays.asList(files);
list.sort(Comparator.comparing(File::getName));
return list;
}
}
}
#Configuration
public class FilePollingConfiguration {
#Bean(name="inboundReadDirectory")
public File inboundReadDirectory(#Value("${inbound.read.path}") String path) {
return makeDirectory(path);
}
#Bean(name="inboundProcessedDirectory")
public File inboundProcessedDirectory(#Value("${inbound.processed.path}") String path) {
return makeDirectory(path);
}
#Bean(name="inboundFailedDirectory")
public File inboundFailedDirectory(#Value("${inbound.failed.path}") String path) {
return makeDirectory(path);
}
#Bean(name="inboundOutDirectory")
public File inboundOutDirectory(#Value("${inbound.out.path}") String path) {
return makeDirectory(path);
}
private File makeDirectory(String path) {
File file = new File(path);
file.mkdirs();
return file;
}
}
By doing this and removing the PriorityChannel, it still seems that the transaction isn't working as I would thought. Using this flow, the file is not available in the Http outbound gateway. Any idea why?
#Component
public class MessageProcessingIntegrationFlow {
public static final String OUTBOUND_FILENAME_GENERATOR = "outboundFilenameGenerator.handler";
public static final String FILE_WRITING_MESSAGE_HANDLER = "fileWritingMessageHandler";
#Autowired
public File inboundOutDirectory;
#Bean
public IntegrationFlow writeToFile(#Value("${api.base.uri}") URI uri,
#Value("${out.filename.dateFormat}") String dateFormat, #Value("${out.filename.suffix}") String filenameSuffix) {
return IntegrationFlows.from(ApplicationConfiguration.INBOUND_CHANNEL)
.enrichHeaders(h -> h.headerFunction(IntegrationMessageHeaderAccessor.CORRELATION_ID, m -> ((String) m
.getHeaders()
.get(FileHeaders.FILENAME)).substring(0, 17)))
.aggregate(a -> a.groupTimeout(2000)
.sendPartialResultOnExpiry(true))
.transform(m -> {
MultiValueMap<String, Object> body = new LinkedMultiValueMap<>();
//noinspection unchecked
((List<File>) m).forEach(f -> body.add("documents", new FileSystemResource((File) f)));
return body;
})
.handle(Http.outboundGateway(uri)
.httpMethod(HttpMethod.POST)
.expectedResponseType(byte[].class))
.handle(Files.outboundGateway(inboundOutDirectory)
.autoCreateDirectory(true)
.fileNameGenerator(
m -> m.getHeaders()
.get(FileHeaders.FILENAME) + "_" + DateTimeFormatter.ofPattern(dateFormat)
.format(LocalDateTime
.now()) + filenameSuffix))
.log(LoggingHandler.Level.INFO)
.get();
}
}
You cannot switch threads with Spring transactions; the transaction is bound to the thread.
You can use a custom FileListFilter in the message source instead and sort the files there.
Thanks to Gary Russel I came up with the following solution:
#Bean
public IntegrationFlow inboundFileIntegration(#Value("${inbound.file.poller.fixed.delay}") long period,
#Value("${inbound.file.poller.max.messages.per.poll}") int maxMessagesPerPoll,
#Value("${inbound.file.poller.thread.pool.size}") int poolSize,
MessageSource<File> fileReadingMessageSource) {
return IntegrationFlows
.from(fileReadingMessageSource,
c -> c.poller(Pollers.fixedDelay(period)
.taskExecutor(Executors.newFixedThreadPool(poolSize))
.maxMessagesPerPoll(maxMessagesPerPoll)))
.channel("alphabetically")
.bridge(s -> s.poller(Pollers.fixedDelay(100)))
.channel(ApplicationConfiguration.INBOUND_CHANNEL)
.get();
}
Advices with spec:
#Bean
public Advice fileMoveAdvice() {
ExpressionEvaluatingRequestHandlerAdvice advice = new ExpressionEvaluatingRequestHandlerAdvice();
advice.setOnSuccessExpression(new FunctionExpression<Message<?>>(m -> renameMultiValueMapFiles(m, this.inboundProcessedDirectory)));
advice.setOnFailureExpression(new FunctionExpression<Message<?>>(m -> renameMultiValueMapFiles(m, this.inboundFailedDirectory)));
return advice;
}
#Bean
public Consumer<GenericEndpointSpec<HttpRequestExecutingMessageHandler>> outboundSpec() {
return new Consumer<GenericEndpointSpec<HttpRequestExecutingMessageHandler>>() {
#Override
public void accept(GenericEndpointSpec<HttpRequestExecutingMessageHandler> spec) {
spec.advice(fileMoveAdvice(), retryAdvice());
}
};
}
#SneakyThrows(IOException.class)
private boolean renameMultiValueMapFiles(Message<?> m, File directory) {
MultiValueMap<String, Resource> files = (MultiValueMap<String, Resource>) m.getPayload();
List<File> list = new ArrayList<>();
// no lambda to avoid ThrowsFunction type
for (List<Resource> l : files.values()) {
for (Resource v : l) {
list.add(v.getFile());
}
}
list.forEach(v -> v.renameTo(new File(directory.getPath(), v.getName())));
return true;
}
Added spec to handle:
.handle(Http.outboundGateway(uri)
.httpMethod(HttpMethod.POST)
.expectedResponseType(byte[].class), this.advices.outboundSpec())

publishSubscribeChannel unit test can't work well

my integration config class is below,when i do some unit test on them,found that:
when i send message to UserRecipientSubscribeCacheChannel,it work well;
when i send a message to an upper level of channel userReportWriteCompletedRouteChannel, it work failed,and it don't throws any exceptions yet. i can't understand it. the messages that i sent is same,of course.
because of the fail section, the next handler can't work ok.
ty!!
it work ok below, it print ===>ip location channel message:GenericMessage [payload=[MailRecipientActionDocumen...and ===>user recipient channel message:GenericMessage [payload=[UserRecipientSubscribeDataRedisStructure...
#Test
public void test_sendMessageUserRecipientSubscribeCacheChannel(){
UserRecipientSubscribeCacheChannel.send(createMessageWithIp());
}
it work fail below, it print ===>ip location channel message:GenericMessage [payload=[MailRecipientActionDocumen... only
notice that: the fail section, In front of handler has a transformer.
#Test
public void test_sendMessageToRouteChannel() {
userReportWriteCompletedRouteChannel.send(createMessageWithIp());
}
my code config below:
#Bean
public SubscribableChannel userReportWriteCompletedSubscribeChannel() {
return new DirectChannel();
}
#Bean
public QueueChannel userReportWriteCompletedRouteChannel() {
return new QueueChannel();
}
#Bean
public MessageChannel ipLocationResolveCacheChannel() {
return new DirectChannel();
}
#Bean
public MessageChannel userRecipientSubscribeCacheChannel() {
return new DirectChannel();
}
#MessagingGateway(name = "userReportWriteCompletedListener",
defaultRequestChannel = "userReportWriteCompletedRouteChannel")
public interface UserReportWriteCompletedListener {
#Gateway
void receive(List<UserMailRecipientActionDocument> docs);
}
#Bean
public IntegrationFlow bridgeFlow() {
return flow -> flow.channel("userReportWriteCompletedRouteChannel")
.bridge(bridgeSpe -> bridgeSpe
.poller(pollerFactory -> pollerFactory.fixedRate(500).maxMessagesPerPoll(1)))
.channel("userReportWriteCompletedSubscribeChannel")
;
}
#Bean
public IntegrationFlow subscribeFlow() {
return IntegrationFlows.from("userReportWriteCompletedSubscribeChannel")
.publishSubscribeChannel(publishSubscribeSpec -> publishSubscribeSpec
.subscribe(flow -> flow
.channel(IP_LOCATION_RESOLVE_CACHE_CHANNEL)
)
.subscribe(flow -> flow
.channel(USER_RECIPIENT_SUBSCRIBE_CACHE_CHANNEL)
))
.get();
}
#Bean
public RedisStoreWritingMessageHandler ipLocationResolveCacheHandler(RedisTemplate<String, ?> redisTemplate) {
final RedisStoreWritingMessageHandler ipLocationResolveCacheHandler =
new RedisStoreWritingMessageHandler(redisTemplate);
ipLocationResolveCacheHandler.setKey("IP_LOCATION_RESOLVE_CACHE");
return ipLocationResolveCacheHandler;
}
#Bean
public RedisStoreWritingMessageHandler userRecipientSubscribeCacheHandler(RedisTemplate<String, ?> redisTemplate) {
final RedisStoreWritingMessageHandler userRecipientSubscribeCacheHandler =
new RedisStoreWritingMessageHandler(redisTemplate);
userRecipientSubscribeCacheHandler.setKey("USER_RECIPIENT_SUBSCRIBE_CACHE");
return userRecipientSubscribeCacheHandler;
}
#Bean
public IpLocationResolveRedisStructureFilterAndTransformer recipientActionHasIpFilterAndTransformer() {
return new IpLocationResolveRedisStructureFilterAndTransformer();
}
#Bean
public UserRecipientSubscribeDataRedisStructureTransformer subscribeDataRedisStructureTransformer(
IpLocationClient ipLocationClient) {
return new UserRecipientSubscribeDataRedisStructureTransformer(ipLocationClient);
}
#Bean
public IntegrationFlow ipLocationResolveCacheFlow(
#Qualifier("ipLocationResolveCacheHandler") RedisStoreWritingMessageHandler writingMessageHandler) {
return flow -> flow.channel(IP_LOCATION_RESOLVE_CACHE_CHANNEL)
.handle(message -> {
System.out.println("===>ip location channel message:" + message);
})
;
}
#Bean
public IntegrationFlow userRecipientActionDataCacheFlow(
#Qualifier("userRecipientSubscribeCacheHandler") RedisStoreWritingMessageHandler messageHandler,
UserRecipientSubscribeDataRedisStructureTransformer transformer) {
return flow -> flow.channel(USER_RECIPIENT_SUBSCRIBE_CACHE_CHANNEL)
.transform(transformer)
.handle(message -> {
System.out.println("===>user recipient channel message:" + message);
})
}
i expect 2 print message info ,but print 1 only.
Today, i found that the bridge flow may had some problem, When I move the handler behind the channeluserReportWriteCompletedSubscribeChannel, it can't print any message;
when i remove channel and add handler directly, it will print message.
does i use the bridge wrong?
#Bean
public IntegrationFlow bridgeFlow() {
return flow -> flow.channel("userReportWriteCompletedRouteChannel")
.bridge(bridgeSpe -> bridgeSpe
.poller(pollerFactory -> pollerFactory.fixedRate(100).maxMessagesPerPoll(1)))
.handle(message -> {
System.out.println("===>route channel message:" + message);
}) // handle ok , will print message
.channel("userReportWriteCompletedSubscribeChannel")
// .handle(message -> {
// System.out.println("===>route channel message:" + message);
// }) // handle fail , will not printing message
;
}
test:
#Test
//invalid
public void test_sendMessageToRouteChannel() {
userReportWriteCompletedRouteChannel.send(createMessageWithIp());
}

Spring Integration redelivery via errorChannel throw with JmsTransactionManager doesnt honor maximumRedeliveries

Related to SO question: Spring Integration Java DSL using JMS retry/redlivery
Using a transacted poller and JmsTransactionManager on a connectionFactory with maximumRedeliveries set to 3 results in a doubling of the actual redlievery attempts.
How can I get this to honor the redelivery settings of the connection factory?
My connectionFactory is built as:
#Bean (name="spring-int-connection-factory")
ActiveMQConnectionFactory jmsConnectionFactory(){
return buildConnectionFactory(
brokerUrl,
DELAY_2_SECS,
MAX_REDELIVERIES,
"spring-int");
}
public static ActiveMQConnectionFactory buildConnectionFactory(String brokerUrl, Long retryDelay, Integer maxRedeliveries, String clientIdPrefix){
ActiveMQConnectionFactory amqcf = new ActiveMQConnectionFactory();
amqcf.setBrokerURL(brokerUrl);
amqcf.setClientIDPrefix(clientIdPrefix);
if (maxRedeliveries != null) {
if (retryDelay == null) {
retryDelay = 500L;
}
RedeliveryPolicy rp = new org.apache.activemq.RedeliveryPolicy();
rp.setInitialRedeliveryDelay(retryDelay);
rp.setRedeliveryDelay(retryDelay);
rp.setMaximumRedeliveries(maxRedeliveries);
}
return amqcf;
}
My flow with poller is as:
#Bean
public IntegrationFlow flow2(#Qualifier("spring-int-connection-factory") ConnectionFactory connectionFactory) {
IntegrationFlow flow = IntegrationFlows.from(
Jms.inboundAdapter(connectionFactory)
.configureJmsTemplate(t -> t.receiveTimeout(1000).sessionTransacted(true))
.destination(INPUT_DIRECT_QUEUE),
e -> e.poller(Pollers
.fixedDelay(5000)
.transactional()
.errorChannel("customErrorChannel")
.maxMessagesPerPoll(2))
).handle(this.msgHandler).get();
return flow;
}
My errorChannel handler simply re-throws which causes JMS redelivery to happen.
When I run this with the handler set to always throw an exception, I see that the message handler actually receives the message 7 times (1 initial and 6 redeliveries).
I expected only 3 redeliveries according to my connectionFactory config.
Any ideas what is causing the doubling of attempts and how to mitigate it?
This works fine for me - stops at 4...
#SpringBootApplication
public class So51792909Application {
private static final Logger logger = LoggerFactory.getLogger(So51792909Application.class);
public static void main(String[] args) {
SpringApplication.run(So51792909Application.class, args);
}
#Bean
public ApplicationRunner runner(JmsTemplate template) {
return args -> {
for (int i = 0; i < 1; i++) {
template.convertAndSend("foo", "test");
}
};
}
#Bean
public IntegrationFlow flow(ConnectionFactory connectionFactory) {
return IntegrationFlows.from(Jms.inboundAdapter(connectionFactory)
.destination("foo"), e -> e
.poller(Pollers
.fixedDelay(5000)
.transactional()
.maxMessagesPerPoll(2)))
.handle((p, h) -> {
System.out.println(h.get("JMSXDeliveryCount"));
try {
Thread.sleep(2000);
}
catch (InterruptedException e1) {
Thread.currentThread().interrupt();
}
throw new RuntimeException("foo");
})
.get();
}
#Bean
public JmsTransactionManager transactionManager(ConnectionFactory cf) {
return new JmsTransactionManager(cf);
}
#Bean
public ActiveMQConnectionFactory amqCF() {
ActiveMQConnectionFactory cf = new ActiveMQConnectionFactory("vm://localhost?broker.persistent=false");
RedeliveryPolicy rp = new RedeliveryPolicy();
rp.setMaximumRedeliveries(3);
cf.setRedeliveryPolicy(rp);
return cf;
}
public CachingConnectionFactory connectionFactory() {
return new CachingConnectionFactory(amqCF());
}
#JmsListener(destination = "ActiveMQ.DLQ")
public void listen(String in) {
logger.info(in);
}
}

Spring Cloud App Starter, sftp source, recurse a directory for files

I am using SFTP Source in Spring cloud dataflow and it is working for getting files define in sftp:remote-dir:/home/someone/source , Now I have a many subfolders under the remote-dir and I want to recursively get all the files under this directory which match the patten. I am trying to use filename-regex: but so far it only works on one level. How do I recursively get the files I need.
The inbound channel adapter does not support recursion; use a custom source with the outbound gateway with an MGET command, with recursion (-R).
The doc is missing that option; fixed in the current docs.
I opened an issue to create a standard app starter.
EDIT
With the Java DSL...
#SpringBootApplication
#EnableBinding(Source.class)
public class So44710754Application {
public static void main(String[] args) {
SpringApplication.run(So44710754Application.class, args);
}
// should store in Redis or similar for persistence
private final ConcurrentMap<String, Boolean> processed = new ConcurrentHashMap<>();
#Bean
public IntegrationFlow flow() {
return IntegrationFlows.from(source(), e -> e.poller(Pollers.fixedDelay(30_000)))
.handle(gateway())
.split()
.<File>filter(p -> this.processed.putIfAbsent(p.getAbsolutePath(), true) == null)
.transform(Transformers.fileToByteArray())
.channel(Source.OUTPUT)
.get();
}
private MessageSource<String> source() {
return () -> new GenericMessage<>("foo/*");
}
private AbstractRemoteFileOutboundGateway<LsEntry> gateway() {
AbstractRemoteFileOutboundGateway<LsEntry> gateway = Sftp.outboundGateway(sessionFactory(), "mget", "payload")
.localDirectory(new File("/tmp/foo"))
.options(Option.RECURSIVE)
.get();
gateway.setFileExistsMode(FileExistsMode.IGNORE);
return gateway;
}
private SessionFactory<LsEntry> sessionFactory() {
DefaultSftpSessionFactory sf = new DefaultSftpSessionFactory();
sf.setHost("10.0.0.3");
sf.setUser("ftptest");
sf.setPassword("ftptest");
sf.setAllowUnknownKeys(true);
return new CachingSessionFactory<>(sf);
}
}
And with Java config...
#SpringBootApplication
#EnableBinding(Source.class)
public class So44710754Application {
public static void main(String[] args) {
SpringApplication.run(So44710754Application.class, args);
}
#InboundChannelAdapter(channel = "sftpGate", poller = #Poller(fixedDelay = "30000"))
public String remoteDir() {
return "foo/*";
}
#Bean
#ServiceActivator(inputChannel = "sftpGate")
public SftpOutboundGateway mgetGate() {
SftpOutboundGateway sftpOutboundGateway = new SftpOutboundGateway(sessionFactory(), "mget", "payload");
sftpOutboundGateway.setOutputChannelName("splitterChannel");
sftpOutboundGateway.setFileExistsMode(FileExistsMode.IGNORE);
sftpOutboundGateway.setLocalDirectory(new File("/tmp/foo"));
sftpOutboundGateway.setOptions("-R");
return sftpOutboundGateway;
}
#Bean
#Splitter(inputChannel = "splitterChannel")
public DefaultMessageSplitter splitter() {
DefaultMessageSplitter splitter = new DefaultMessageSplitter();
splitter.setOutputChannelName("filterChannel");
return splitter;
}
// should store in Redis, Zookeeper, or similar for persistence
private final ConcurrentMap<String, Boolean> processed = new ConcurrentHashMap<>();
#Filter(inputChannel = "filterChannel", outputChannel = "toBytesChannel")
public boolean filter(File payload) {
return this.processed.putIfAbsent(payload.getAbsolutePath(), true) == null;
}
#Bean
#Transformer(inputChannel = "toBytesChannel", outputChannel = Source.OUTPUT)
public FileToByteArrayTransformer toBytes() {
FileToByteArrayTransformer transformer = new FileToByteArrayTransformer();
return transformer;
}
private SessionFactory<LsEntry> sessionFactory() {
DefaultSftpSessionFactory sf = new DefaultSftpSessionFactory();
sf.setHost("10.0.0.3");
sf.setUser("ftptest");
sf.setPassword("ftptest");
sf.setAllowUnknownKeys(true);
return new CachingSessionFactory<>(sf);
}
}

Resources