001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.camel.processor.aggregate;
018
019import java.util.ArrayList;
020import java.util.Collections;
021import java.util.LinkedHashSet;
022import java.util.List;
023import java.util.Map;
024import java.util.Set;
025import java.util.concurrent.ConcurrentHashMap;
026import java.util.concurrent.ConcurrentSkipListSet;
027import java.util.concurrent.ExecutorService;
028import java.util.concurrent.ScheduledExecutorService;
029import java.util.concurrent.TimeUnit;
030import java.util.concurrent.atomic.AtomicBoolean;
031import java.util.concurrent.atomic.AtomicInteger;
032import java.util.concurrent.atomic.AtomicLong;
033import java.util.concurrent.locks.Lock;
034import java.util.concurrent.locks.ReentrantLock;
035
036import org.apache.camel.AsyncCallback;
037import org.apache.camel.AsyncProcessor;
038import org.apache.camel.CamelContext;
039import org.apache.camel.CamelContextAware;
040import org.apache.camel.CamelExchangeException;
041import org.apache.camel.Endpoint;
042import org.apache.camel.Exchange;
043import org.apache.camel.Expression;
044import org.apache.camel.Navigate;
045import org.apache.camel.NoSuchEndpointException;
046import org.apache.camel.Predicate;
047import org.apache.camel.Processor;
048import org.apache.camel.ProducerTemplate;
049import org.apache.camel.ShutdownRunningTask;
050import org.apache.camel.TimeoutMap;
051import org.apache.camel.Traceable;
052import org.apache.camel.spi.AggregationRepository;
053import org.apache.camel.spi.ExceptionHandler;
054import org.apache.camel.spi.IdAware;
055import org.apache.camel.spi.OptimisticLockingAggregationRepository;
056import org.apache.camel.spi.RecoverableAggregationRepository;
057import org.apache.camel.spi.ShutdownAware;
058import org.apache.camel.spi.ShutdownPrepared;
059import org.apache.camel.spi.Synchronization;
060import org.apache.camel.support.DefaultTimeoutMap;
061import org.apache.camel.support.LoggingExceptionHandler;
062import org.apache.camel.support.ServiceSupport;
063import org.apache.camel.util.AsyncProcessorHelper;
064import org.apache.camel.util.ExchangeHelper;
065import org.apache.camel.util.LRUCacheFactory;
066import org.apache.camel.util.ObjectHelper;
067import org.apache.camel.util.ServiceHelper;
068import org.apache.camel.util.StopWatch;
069import org.apache.camel.util.TimeUtils;
070import org.slf4j.Logger;
071import org.slf4j.LoggerFactory;
072
073/**
074 * An implementation of the <a
075 * href="http://camel.apache.org/aggregator2.html">Aggregator</a>
076 * pattern where a batch of messages are processed (up to a maximum amount or
077 * until some timeout is reached) and messages for the same correlation key are
078 * combined together using some kind of {@link AggregationStrategy}
079 * (by default the latest message is used) to compress many message exchanges
080 * into a smaller number of exchanges.
081 * <p/>
082 * A good example of this is stock market data; you may be receiving 30,000
083 * messages/second and you may want to throttle it right down so that multiple
084 * messages for the same stock are combined (or just the latest message is used
085 * and older prices are discarded). Another idea is to combine line item messages
086 * together into a single invoice message.
087 */
088public class AggregateProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, ShutdownPrepared, ShutdownAware, IdAware {
089
090    public static final String AGGREGATE_TIMEOUT_CHECKER = "AggregateTimeoutChecker";
091
092    private static final Logger LOG = LoggerFactory.getLogger(AggregateProcessor.class);
093
094    private final Lock lock = new ReentrantLock();
095    private final AtomicBoolean aggregateRepositoryWarned = new AtomicBoolean();
096    private final CamelContext camelContext;
097    private final Processor processor;
098    private String id;
099    private AggregationStrategy aggregationStrategy;
100    private boolean preCompletion;
101    private Expression correlationExpression;
102    private AggregateController aggregateController;
103    private final ExecutorService executorService;
104    private final boolean shutdownExecutorService;
105    private OptimisticLockRetryPolicy optimisticLockRetryPolicy = new OptimisticLockRetryPolicy();
106    private ScheduledExecutorService timeoutCheckerExecutorService;
107    private boolean shutdownTimeoutCheckerExecutorService;
108    private ScheduledExecutorService recoverService;
109    // store correlation key -> exchange id in timeout map
110    private TimeoutMap<String, String> timeoutMap;
111    private ExceptionHandler exceptionHandler;
112    private AggregationRepository aggregationRepository;
113    private Map<String, String> closedCorrelationKeys;
114    private final Set<String> batchConsumerCorrelationKeys = new ConcurrentSkipListSet<>();
115    private final Set<String> inProgressCompleteExchanges = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
116    private final Map<String, RedeliveryData> redeliveryState = new ConcurrentHashMap<>();
117
118    private final AggregateProcessorStatistics statistics = new Statistics();
119    private final AtomicLong totalIn = new AtomicLong();
120    private final AtomicLong totalCompleted = new AtomicLong();
121    private final AtomicLong completedBySize = new AtomicLong();
122    private final AtomicLong completedByStrategy = new AtomicLong();
123    private final AtomicLong completedByInterval = new AtomicLong();
124    private final AtomicLong completedByTimeout = new AtomicLong();
125    private final AtomicLong completedByPredicate = new AtomicLong();
126    private final AtomicLong completedByBatchConsumer = new AtomicLong();
127    private final AtomicLong completedByForce = new AtomicLong();
128
129    // keep booking about redelivery
130    private class RedeliveryData {
131        int redeliveryCounter;
132    }
133
134    private class Statistics implements AggregateProcessorStatistics {
135
136        private boolean statisticsEnabled = true;
137
138        public long getTotalIn() {
139            return totalIn.get();
140        }
141
142        public long getTotalCompleted() {
143            return totalCompleted.get();
144        }
145
146        public long getCompletedBySize() {
147            return completedBySize.get();
148        }
149
150        public long getCompletedByStrategy() {
151            return completedByStrategy.get();
152        }
153
154        public long getCompletedByInterval() {
155            return completedByInterval.get();
156        }
157
158        public long getCompletedByTimeout() {
159            return completedByTimeout.get();
160        }
161
162        public long getCompletedByPredicate() {
163            return completedByPredicate.get();
164        }
165
166        public long getCompletedByBatchConsumer() {
167            return completedByBatchConsumer.get();
168        }
169
170        public long getCompletedByForce() {
171            return completedByForce.get();
172        }
173
174        public void reset() {
175            totalIn.set(0);
176            totalCompleted.set(0);
177            completedBySize.set(0);
178            completedByStrategy.set(0);
179            completedByTimeout.set(0);
180            completedByPredicate.set(0);
181            completedByBatchConsumer.set(0);
182            completedByForce.set(0);
183        }
184
185        public boolean isStatisticsEnabled() {
186            return statisticsEnabled;
187        }
188
189        public void setStatisticsEnabled(boolean statisticsEnabled) {
190            this.statisticsEnabled = statisticsEnabled;
191        }
192    }
193
194    // options
195    private boolean ignoreInvalidCorrelationKeys;
196    private Integer closeCorrelationKeyOnCompletion;
197    private boolean parallelProcessing;
198    private boolean optimisticLocking;
199
200    // different ways to have completion triggered
201    private boolean eagerCheckCompletion;
202    private Predicate completionPredicate;
203    private long completionTimeout;
204    private Expression completionTimeoutExpression;
205    private long completionInterval;
206    private int completionSize;
207    private Expression completionSizeExpression;
208    private boolean completionFromBatchConsumer;
209    private boolean completionOnNewCorrelationGroup;
210    private AtomicInteger batchConsumerCounter = new AtomicInteger();
211    private boolean discardOnCompletionTimeout;
212    private boolean forceCompletionOnStop;
213    private boolean completeAllOnStop;
214    private long completionTimeoutCheckerInterval = 1000;
215
216    private ProducerTemplate deadLetterProducerTemplate;
217
218    public AggregateProcessor(CamelContext camelContext, Processor processor,
219                              Expression correlationExpression, AggregationStrategy aggregationStrategy,
220                              ExecutorService executorService, boolean shutdownExecutorService) {
221        ObjectHelper.notNull(camelContext, "camelContext");
222        ObjectHelper.notNull(processor, "processor");
223        ObjectHelper.notNull(correlationExpression, "correlationExpression");
224        ObjectHelper.notNull(aggregationStrategy, "aggregationStrategy");
225        ObjectHelper.notNull(executorService, "executorService");
226        this.camelContext = camelContext;
227        this.processor = processor;
228        this.correlationExpression = correlationExpression;
229        this.aggregationStrategy = aggregationStrategy;
230        this.executorService = executorService;
231        this.shutdownExecutorService = shutdownExecutorService;
232        this.exceptionHandler = new LoggingExceptionHandler(camelContext, getClass());
233    }
234
235    @Override
236    public String toString() {
237        return "AggregateProcessor[to: " + processor + "]";
238    }
239
240    public String getTraceLabel() {
241        return "aggregate[" + correlationExpression + "]";
242    }
243
244    public List<Processor> next() {
245        if (!hasNext()) {
246            return null;
247        }
248        List<Processor> answer = new ArrayList<>(1);
249        answer.add(processor);
250        return answer;
251    }
252
253    public boolean hasNext() {
254        return processor != null;
255    }
256
257    public String getId() {
258        return id;
259    }
260
261    public void setId(String id) {
262        this.id = id;
263    }
264
265    public void process(Exchange exchange) throws Exception {
266        AsyncProcessorHelper.process(this, exchange);
267    }
268
269    public boolean process(Exchange exchange, AsyncCallback callback) {
270        try {
271            doProcess(exchange);
272        } catch (Throwable e) {
273            exchange.setException(e);
274        }
275        callback.done(true);
276        return true;
277    }
278
279    protected void doProcess(Exchange exchange) throws Exception {
280
281        if (getStatistics().isStatisticsEnabled()) {
282            totalIn.incrementAndGet();
283        }
284
285        //check for the special header to force completion of all groups (and ignore the exchange otherwise)
286        boolean completeAllGroups = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS, false, boolean.class);
287        if (completeAllGroups) {
288            // remove the header so we do not complete again
289            exchange.getIn().removeHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS);
290            forceCompletionOfAllGroups();
291            return;
292        }
293
294        // compute correlation expression
295        String key = correlationExpression.evaluate(exchange, String.class);
296        if (ObjectHelper.isEmpty(key)) {
297            // we have a bad correlation key
298            if (isIgnoreInvalidCorrelationKeys()) {
299                LOG.debug("Invalid correlation key. This Exchange will be ignored: {}", exchange);
300                return;
301            } else {
302                throw new CamelExchangeException("Invalid correlation key", exchange);
303            }
304        }
305
306        // is the correlation key closed?
307        if (closedCorrelationKeys != null && closedCorrelationKeys.containsKey(key)) {
308            throw new ClosedCorrelationKeyException(key, exchange);
309        }
310
311        // when optimist locking is enabled we keep trying until we succeed
312        if (optimisticLocking) {
313            List<Exchange> aggregated = null;
314            boolean exhaustedRetries = true;
315            int attempt = 0;
316            do {
317                attempt++;
318                // copy exchange, and do not share the unit of work
319                // the aggregated output runs in another unit of work
320                Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
321
322                // remove the complete all groups headers as it should not be on the copy
323                copy.getIn().removeHeader(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP);
324                copy.getIn().removeHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS);
325                copy.getIn().removeHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE);
326
327                try {
328                    aggregated = doAggregation(key, copy);
329                    exhaustedRetries = false;
330                    break;
331                } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
332                    LOG.trace("On attempt {} OptimisticLockingAggregationRepository: {} threw OptimisticLockingException while trying to add() key: {} and exchange: {}",
333                              new Object[]{attempt, aggregationRepository, key, copy, e});
334                    optimisticLockRetryPolicy.doDelay(attempt);
335                }
336            } while (optimisticLockRetryPolicy.shouldRetry(attempt));
337
338            if (exhaustedRetries) {
339                throw new CamelExchangeException("Exhausted optimistic locking retry attempts, tried " + attempt + " times", exchange,
340                        new OptimisticLockingAggregationRepository.OptimisticLockingException());
341            } else if (aggregated != null) {
342                // we are completed so submit to completion
343                for (Exchange agg : aggregated) {
344                    onSubmitCompletion(key, agg);
345                }
346            }
347        } else {
348            // copy exchange, and do not share the unit of work
349            // the aggregated output runs in another unit of work
350            Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
351
352            // remove the complete all groups headers as it should not be on the copy
353            copy.getIn().removeHeader(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP);
354            copy.getIn().removeHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS);
355            copy.getIn().removeHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE);
356
357            // when memory based then its fast using synchronized, but if the aggregation repository is IO
358            // bound such as JPA etc then concurrent aggregation per correlation key could
359            // improve performance as we can run aggregation repository get/add in parallel
360            List<Exchange> aggregated;
361            lock.lock();
362            try {
363                aggregated = doAggregation(key, copy);
364            } finally {
365                lock.unlock();
366            }
367            // we are completed so do that work outside the lock
368            if (aggregated != null) {
369                for (Exchange agg : aggregated) {
370                    onSubmitCompletion(key, agg);
371                }
372            }
373        }
374
375        // check for the special header to force completion of all groups (inclusive of the message)
376        boolean completeAllGroupsInclusive = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE, false, boolean.class);
377        if (completeAllGroupsInclusive) {
378            // remove the header so we do not complete again
379            exchange.getIn().removeHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE);
380            forceCompletionOfAllGroups();
381        }
382    }
383
384    /**
385     * Aggregates the exchange with the given correlation key
386     * <p/>
387     * This method <b>must</b> be run synchronized as we cannot aggregate the same correlation key
388     * in parallel.
389     * <p/>
390     * The returned {@link Exchange} should be send downstream using the {@link #onSubmitCompletion(String, org.apache.camel.Exchange)}
391     * method which sends out the aggregated and completed {@link Exchange}.
392     *
393     * @param key      the correlation key
394     * @param newExchange the exchange
395     * @return the aggregated exchange(s) which is complete, or <tt>null</tt> if not yet complete
396     * @throws org.apache.camel.CamelExchangeException is thrown if error aggregating
397     */
398    private List<Exchange> doAggregation(String key, Exchange newExchange) throws CamelExchangeException {
399        LOG.trace("onAggregation +++ start +++ with correlation key: {}", key);
400
401        List<Exchange> list = new ArrayList<>();
402        String complete = null;
403
404        Exchange answer;
405        Exchange originalExchange = aggregationRepository.get(newExchange.getContext(), key);
406        Exchange oldExchange = originalExchange;
407
408        Integer size = 1;
409        if (oldExchange != null) {
410            // hack to support legacy AggregationStrategy's that modify and return the oldExchange, these will not
411            // working when using an identify based approach for optimistic locking like the MemoryAggregationRepository.
412            if (optimisticLocking && aggregationRepository instanceof MemoryAggregationRepository) {
413                oldExchange = originalExchange.copy();
414            }
415            size = oldExchange.getProperty(Exchange.AGGREGATED_SIZE, 0, Integer.class);
416            size++;
417        }
418
419        // prepare the exchanges for aggregation
420        ExchangeHelper.prepareAggregation(oldExchange, newExchange);
421
422        // check if we are pre complete
423        if (preCompletion) {
424            try {
425                // put the current aggregated size on the exchange so its avail during completion check
426                newExchange.setProperty(Exchange.AGGREGATED_SIZE, size);
427                complete = isPreCompleted(key, oldExchange, newExchange);
428                // make sure to track timeouts if not complete
429                if (complete == null) {
430                    trackTimeout(key, newExchange);
431                }
432                // remove it afterwards
433                newExchange.removeProperty(Exchange.AGGREGATED_SIZE);
434            } catch (Throwable e) {
435                // must catch any exception from aggregation
436                throw new CamelExchangeException("Error occurred during preComplete", newExchange, e);
437            }
438        } else if (isEagerCheckCompletion()) {
439            // put the current aggregated size on the exchange so its avail during completion check
440            newExchange.setProperty(Exchange.AGGREGATED_SIZE, size);
441            complete = isCompleted(key, newExchange);
442            // make sure to track timeouts if not complete
443            if (complete == null) {
444                trackTimeout(key, newExchange);
445            }
446            // remove it afterwards
447            newExchange.removeProperty(Exchange.AGGREGATED_SIZE);
448        }
449
450        if (preCompletion && complete != null) {
451            // need to pre complete the current group before we aggregate
452            doAggregationComplete(complete, list, key, originalExchange, oldExchange);
453            // as we complete the current group eager, we should indicate the new group is not complete
454            complete = null;
455            // and clear old/original exchange as we start on a new group
456            oldExchange = null;
457            originalExchange = null;
458            // and reset the size to 1
459            size = 1;
460            // make sure to track timeout as we just restart the correlation group when we are in pre completion mode
461            trackTimeout(key, newExchange);
462        }
463
464        // aggregate the exchanges
465        try {
466            answer = onAggregation(oldExchange, newExchange);
467        } catch (Throwable e) {
468            // must catch any exception from aggregation
469            throw new CamelExchangeException("Error occurred during aggregation", newExchange, e);
470        }
471        if (answer == null) {
472            throw new CamelExchangeException("AggregationStrategy " + aggregationStrategy + " returned null which is not allowed", newExchange);
473        }
474
475        // check for the special exchange property to force completion of all groups
476        boolean completeAllGroups = answer.getProperty(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS, false, boolean.class);
477        if (completeAllGroups) {
478            // remove the exchange property so we do not complete again
479            answer.removeProperty(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS);
480            forceCompletionOfAllGroups();
481        } else if (isCompletionOnNewCorrelationGroup() && originalExchange == null) {
482            // its a new group so force complete of all existing groups
483            forceCompletionOfAllGroups();
484        }
485
486        // special for some repository implementations
487        if (aggregationRepository instanceof RecoverableAggregationRepository) {
488            boolean valid = oldExchange == null || answer.getExchangeId().equals(oldExchange.getExchangeId());
489            if (!valid && aggregateRepositoryWarned.compareAndSet(false, true)) {
490                LOG.warn("AggregationStrategy should return the oldExchange instance instead of the newExchange whenever possible"
491                    + " as otherwise this can lead to unexpected behavior with some RecoverableAggregationRepository implementations");
492            }
493        }
494
495        // update the aggregated size
496        answer.setProperty(Exchange.AGGREGATED_SIZE, size);
497
498        // maybe we should check completion after the aggregation
499        if (!preCompletion && !isEagerCheckCompletion()) {
500            complete = isCompleted(key, answer);
501            // make sure to track timeouts if not complete
502            if (complete == null) {
503                trackTimeout(key, newExchange);
504            }
505        }
506
507        if (complete == null) {
508            // only need to update aggregation repository if we are not complete
509            doAggregationRepositoryAdd(newExchange.getContext(), key, originalExchange, answer);
510        } else {
511            // if we are complete then add the answer to the list
512            doAggregationComplete(complete, list, key, originalExchange, answer);
513        }
514
515        LOG.trace("onAggregation +++  end  +++ with correlation key: {}", key);
516        return list;
517    }
518
519    protected void doAggregationComplete(String complete, List<Exchange> list, String key, Exchange originalExchange, Exchange answer) {
520        if ("consumer".equals(complete)) {
521            for (String batchKey : batchConsumerCorrelationKeys) {
522                Exchange batchAnswer;
523                if (batchKey.equals(key)) {
524                    // skip the current aggregated key as we have already aggregated it and have the answer
525                    batchAnswer = answer;
526                } else {
527                    batchAnswer = aggregationRepository.get(camelContext, batchKey);
528                }
529
530                if (batchAnswer != null) {
531                    batchAnswer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete);
532                    onCompletion(batchKey, originalExchange, batchAnswer, false);
533                    list.add(batchAnswer);
534                }
535            }
536            batchConsumerCorrelationKeys.clear();
537            // we have already submitted to completion, so answer should be null
538            answer = null;
539        } else if (answer != null) {
540            // we are complete for this exchange
541            answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete);
542            answer = onCompletion(key, originalExchange, answer, false);
543        }
544
545        if (answer != null) {
546            list.add(answer);
547        }
548    }
549
550    protected void doAggregationRepositoryAdd(CamelContext camelContext, String key, Exchange oldExchange, Exchange newExchange) {
551        LOG.trace("In progress aggregated oldExchange: {}, newExchange: {} with correlation key: {}", new Object[]{oldExchange, newExchange, key});
552        if (optimisticLocking) {
553            try {
554                ((OptimisticLockingAggregationRepository)aggregationRepository).add(camelContext, key, oldExchange, newExchange);
555            } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
556                onOptimisticLockingFailure(oldExchange, newExchange);
557                throw e;
558            }
559        } else {
560            aggregationRepository.add(camelContext, key, newExchange);
561        }
562    }
563
564    protected void onOptimisticLockingFailure(Exchange oldExchange, Exchange newExchange) {
565        AggregationStrategy strategy = aggregationStrategy;
566        if (strategy instanceof DelegateAggregationStrategy) {
567            strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
568        }
569        if (strategy instanceof OptimisticLockingAwareAggregationStrategy) {
570            LOG.trace("onOptimisticLockFailure with AggregationStrategy: {}, oldExchange: {}, newExchange: {}",
571                      new Object[]{strategy, oldExchange, newExchange});
572            ((OptimisticLockingAwareAggregationStrategy)strategy).onOptimisticLockFailure(oldExchange, newExchange);
573        }
574    }
575
576    /**
577     * Tests whether the given exchanges is pre-complete or not
578     *
579     * @param key      the correlation key
580     * @param oldExchange   the existing exchange
581     * @param newExchange the incoming exchange
582     * @return <tt>null</tt> if not pre-completed, otherwise a String with the type that triggered the pre-completion
583     */
584    protected String isPreCompleted(String key, Exchange oldExchange, Exchange newExchange) {
585        boolean complete = false;
586        AggregationStrategy strategy = aggregationStrategy;
587        if (strategy instanceof DelegateAggregationStrategy) {
588            strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
589        }
590        if (strategy instanceof PreCompletionAwareAggregationStrategy) {
591            complete = ((PreCompletionAwareAggregationStrategy) strategy).preComplete(oldExchange, newExchange);
592        }
593        return complete ? "strategy" : null;
594    }
595
596    /**
597     * Tests whether the given exchange is complete or not
598     *
599     * @param key      the correlation key
600     * @param exchange the incoming exchange
601     * @return <tt>null</tt> if not completed, otherwise a String with the type that triggered the completion
602     */
603    protected String isCompleted(String key, Exchange exchange) {
604        // batch consumer completion must always run first
605        if (isCompletionFromBatchConsumer()) {
606            batchConsumerCorrelationKeys.add(key);
607            batchConsumerCounter.incrementAndGet();
608            int size = exchange.getProperty(Exchange.BATCH_SIZE, 0, Integer.class);
609            if (size > 0 && batchConsumerCounter.intValue() >= size) {
610                // batch consumer is complete then reset the counter
611                batchConsumerCounter.set(0);
612                return "consumer";
613            }
614        }
615
616        if (exchange.getProperty(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP, false, boolean.class)) {
617            return "strategy";
618        }
619
620        if (getCompletionPredicate() != null) {
621            boolean answer = getCompletionPredicate().matches(exchange);
622            if (answer) {
623                return "predicate";
624            }
625        }
626
627        boolean sizeChecked = false;
628        if (getCompletionSizeExpression() != null) {
629            Integer value = getCompletionSizeExpression().evaluate(exchange, Integer.class);
630            if (value != null && value > 0) {
631                // mark as already checked size as expression takes precedence over static configured
632                sizeChecked = true;
633                int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class);
634                if (size >= value) {
635                    return "size";
636                }
637            }
638        }
639        if (!sizeChecked && getCompletionSize() > 0) {
640            int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class);
641            if (size >= getCompletionSize()) {
642                return "size";
643            }
644        }
645
646        // not complete
647        return null;
648    }
649
650    protected void trackTimeout(String key, Exchange exchange) {
651        // timeout can be either evaluated based on an expression or from a fixed value
652        // expression takes precedence
653        boolean timeoutSet = false;
654        if (getCompletionTimeoutExpression() != null) {
655            Long value = getCompletionTimeoutExpression().evaluate(exchange, Long.class);
656            if (value != null && value > 0) {
657                if (LOG.isTraceEnabled()) {
658                    LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
659                            new Object[]{key, value, exchange});
660                }
661                addExchangeToTimeoutMap(key, exchange, value);
662                timeoutSet = true;
663            }
664        }
665        if (!timeoutSet && getCompletionTimeout() > 0) {
666            // timeout is used so use the timeout map to keep an eye on this
667            if (LOG.isTraceEnabled()) {
668                LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
669                        new Object[]{key, getCompletionTimeout(), exchange});
670            }
671            addExchangeToTimeoutMap(key, exchange, getCompletionTimeout());
672        }
673    }
674
675    protected Exchange onAggregation(Exchange oldExchange, Exchange newExchange) {
676        return aggregationStrategy.aggregate(oldExchange, newExchange);
677    }
678
679    protected boolean onPreCompletionAggregation(Exchange oldExchange, Exchange newExchange) {
680        AggregationStrategy strategy = aggregationStrategy;
681        if (strategy instanceof DelegateAggregationStrategy) {
682            strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
683        }
684        if (strategy instanceof PreCompletionAwareAggregationStrategy) {
685            return ((PreCompletionAwareAggregationStrategy) strategy).preComplete(oldExchange, newExchange);
686        }
687        return false;
688    }
689
690    protected Exchange onCompletion(final String key, final Exchange original, final Exchange aggregated, boolean fromTimeout) {
691        // store the correlation key as property before we remove so the repository has that information
692        if (original != null) {
693            original.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key);
694        }
695        aggregated.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key);
696
697        // only remove if we have previous added (as we could potentially complete with only 1 exchange)
698        // (if we have previous added then we have that as the original exchange)
699        if (original != null) {
700            // remove from repository as its completed, we do this first as to trigger any OptimisticLockingException's
701            aggregationRepository.remove(aggregated.getContext(), key, original);
702        }
703
704        if (!fromTimeout && timeoutMap != null) {
705            // cleanup timeout map if it was a incoming exchange which triggered the timeout (and not the timeout checker)
706            LOG.trace("Removing correlation key {} from timeout", key);
707            timeoutMap.remove(key);
708        }
709
710        // this key has been closed so add it to the closed map
711        if (closedCorrelationKeys != null) {
712            closedCorrelationKeys.put(key, key);
713        }
714
715        if (fromTimeout) {
716            // invoke timeout if its timeout aware aggregation strategy,
717            // to allow any custom processing before discarding the exchange
718            AggregationStrategy strategy = aggregationStrategy;
719            if (strategy instanceof DelegateAggregationStrategy) {
720                strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
721            }
722            if (strategy instanceof TimeoutAwareAggregationStrategy) {
723                long timeout = getCompletionTimeout() > 0 ? getCompletionTimeout() : -1;
724                ((TimeoutAwareAggregationStrategy) strategy).timeout(aggregated, -1, -1, timeout);
725            }
726        }
727
728        Exchange answer;
729        if (fromTimeout && isDiscardOnCompletionTimeout()) {
730            // discard due timeout
731            LOG.debug("Aggregation for correlation key {} discarding aggregated exchange: {}", key, aggregated);
732            // must confirm the discarded exchange
733            aggregationRepository.confirm(aggregated.getContext(), aggregated.getExchangeId());
734            // and remove redelivery state as well
735            redeliveryState.remove(aggregated.getExchangeId());
736            // the completion was from timeout and we should just discard it
737            answer = null;
738        } else {
739            // the aggregated exchange should be published (sent out)
740            answer = aggregated;
741        }
742
743        return answer;
744    }
745
746    private void onSubmitCompletion(final String key, final Exchange exchange) {
747        LOG.debug("Aggregation complete for correlation key {} sending aggregated exchange: {}", key, exchange);
748
749        // add this as in progress before we submit the task
750        inProgressCompleteExchanges.add(exchange.getExchangeId());
751
752        // invoke the on completion callback
753        AggregationStrategy target = aggregationStrategy;
754        if (target instanceof DelegateAggregationStrategy) {
755            target = ((DelegateAggregationStrategy) target).getDelegate();
756        }
757        if (target instanceof CompletionAwareAggregationStrategy) {
758            ((CompletionAwareAggregationStrategy) target).onCompletion(exchange);
759        }
760
761        if (getStatistics().isStatisticsEnabled()) {
762            totalCompleted.incrementAndGet();
763
764            String completedBy = exchange.getProperty(Exchange.AGGREGATED_COMPLETED_BY, String.class);
765            if ("interval".equals(completedBy)) {
766                completedByInterval.incrementAndGet();
767            } else if ("timeout".equals(completedBy)) {
768                completedByTimeout.incrementAndGet();
769            } else if ("force".equals(completedBy)) {
770                completedByForce.incrementAndGet();
771            } else if ("consumer".equals(completedBy)) {
772                completedByBatchConsumer.incrementAndGet();
773            } else if ("predicate".equals(completedBy)) {
774                completedByPredicate.incrementAndGet();
775            } else if ("size".equals(completedBy)) {
776                completedBySize.incrementAndGet();
777            } else if ("strategy".equals(completedBy)) {
778                completedByStrategy.incrementAndGet();
779            }
780        }
781
782        // send this exchange
783        executorService.submit(new Runnable() {
784            public void run() {
785                LOG.debug("Processing aggregated exchange: {}", exchange);
786
787                // add on completion task so we remember to update the inProgressCompleteExchanges
788                exchange.addOnCompletion(new AggregateOnCompletion(exchange.getExchangeId()));
789
790                try {
791                    processor.process(exchange);
792                } catch (Throwable e) {
793                    exchange.setException(e);
794                }
795
796                // log exception if there was a problem
797                if (exchange.getException() != null) {
798                    // if there was an exception then let the exception handler handle it
799                    getExceptionHandler().handleException("Error processing aggregated exchange", exchange, exchange.getException());
800                } else {
801                    LOG.trace("Processing aggregated exchange: {} complete.", exchange);
802                }
803            }
804        });
805    }
806
807    /**
808     * Restores the timeout map with timeout values from the aggregation repository.
809     * <p/>
810     * This is needed in case the aggregator has been stopped and started again (for example a server restart).
811     * Then the existing exchanges from the {@link AggregationRepository} must have their timeout conditions restored.
812     */
813    protected void restoreTimeoutMapFromAggregationRepository() throws Exception {
814        // grab the timeout value for each partly aggregated exchange
815        Set<String> keys = aggregationRepository.getKeys();
816        if (keys == null || keys.isEmpty()) {
817            return;
818        }
819
820        StopWatch watch = new StopWatch();
821        LOG.trace("Starting restoring CompletionTimeout for {} existing exchanges from the aggregation repository...", keys.size());
822
823        for (String key : keys) {
824            Exchange exchange = aggregationRepository.get(camelContext, key);
825            // grab the timeout value
826            long timeout = exchange.hasProperties() ? exchange.getProperty(Exchange.AGGREGATED_TIMEOUT, 0, long.class) : 0;
827            if (timeout > 0) {
828                LOG.trace("Restoring CompletionTimeout for exchangeId: {} with timeout: {} millis.", exchange.getExchangeId(), timeout);
829                addExchangeToTimeoutMap(key, exchange, timeout);
830            }
831        }
832
833        // log duration of this task so end user can see how long it takes to pre-check this upon starting
834        LOG.info("Restored {} CompletionTimeout conditions in the AggregationTimeoutChecker in {}",
835                timeoutMap.size(), TimeUtils.printDuration(watch.taken()));
836    }
837
838    /**
839     * Adds the given exchange to the timeout map, which is used by the timeout checker task to trigger timeouts.
840     *
841     * @param key      the correlation key
842     * @param exchange the exchange
843     * @param timeout  the timeout value in millis
844     */
845    private void addExchangeToTimeoutMap(String key, Exchange exchange, long timeout) {
846        // store the timeout value on the exchange as well, in case we need it later
847        exchange.setProperty(Exchange.AGGREGATED_TIMEOUT, timeout);
848        timeoutMap.put(key, exchange.getExchangeId(), timeout);
849    }
850
851    /**
852     * Current number of closed correlation keys in the memory cache
853     */
854    public int getClosedCorrelationKeysCacheSize() {
855        if (closedCorrelationKeys != null) {
856            return closedCorrelationKeys.size();
857        } else {
858            return 0;
859        }
860    }
861
862    /**
863     * Clear all the closed correlation keys stored in the cache
864     */
865    public void clearClosedCorrelationKeysCache() {
866        if (closedCorrelationKeys != null) {
867            closedCorrelationKeys.clear();
868        }
869    }
870
871    public AggregateProcessorStatistics getStatistics() {
872        return statistics;
873    }
874
875    public int getInProgressCompleteExchanges() {
876        return inProgressCompleteExchanges.size();
877    }
878
879    public Predicate getCompletionPredicate() {
880        return completionPredicate;
881    }
882
883    public void setCompletionPredicate(Predicate completionPredicate) {
884        this.completionPredicate = completionPredicate;
885    }
886
887    public boolean isEagerCheckCompletion() {
888        return eagerCheckCompletion;
889    }
890
891    public void setEagerCheckCompletion(boolean eagerCheckCompletion) {
892        this.eagerCheckCompletion = eagerCheckCompletion;
893    }
894
895    public long getCompletionTimeout() {
896        return completionTimeout;
897    }
898
899    public void setCompletionTimeout(long completionTimeout) {
900        this.completionTimeout = completionTimeout;
901    }
902
903    public Expression getCompletionTimeoutExpression() {
904        return completionTimeoutExpression;
905    }
906
907    public void setCompletionTimeoutExpression(Expression completionTimeoutExpression) {
908        this.completionTimeoutExpression = completionTimeoutExpression;
909    }
910
911    public long getCompletionInterval() {
912        return completionInterval;
913    }
914
915    public void setCompletionInterval(long completionInterval) {
916        this.completionInterval = completionInterval;
917    }
918
919    public int getCompletionSize() {
920        return completionSize;
921    }
922
923    public void setCompletionSize(int completionSize) {
924        this.completionSize = completionSize;
925    }
926
927    public Expression getCompletionSizeExpression() {
928        return completionSizeExpression;
929    }
930
931    public void setCompletionSizeExpression(Expression completionSizeExpression) {
932        this.completionSizeExpression = completionSizeExpression;
933    }
934
935    public boolean isIgnoreInvalidCorrelationKeys() {
936        return ignoreInvalidCorrelationKeys;
937    }
938
939    public void setIgnoreInvalidCorrelationKeys(boolean ignoreInvalidCorrelationKeys) {
940        this.ignoreInvalidCorrelationKeys = ignoreInvalidCorrelationKeys;
941    }
942
943    public Integer getCloseCorrelationKeyOnCompletion() {
944        return closeCorrelationKeyOnCompletion;
945    }
946
947    public void setCloseCorrelationKeyOnCompletion(Integer closeCorrelationKeyOnCompletion) {
948        this.closeCorrelationKeyOnCompletion = closeCorrelationKeyOnCompletion;
949    }
950
951    public boolean isCompletionFromBatchConsumer() {
952        return completionFromBatchConsumer;
953    }
954
955    public void setCompletionFromBatchConsumer(boolean completionFromBatchConsumer) {
956        this.completionFromBatchConsumer = completionFromBatchConsumer;
957    }
958
959    public boolean isCompletionOnNewCorrelationGroup() {
960        return completionOnNewCorrelationGroup;
961    }
962
963    public void setCompletionOnNewCorrelationGroup(boolean completionOnNewCorrelationGroup) {
964        this.completionOnNewCorrelationGroup = completionOnNewCorrelationGroup;
965    }
966
967    public boolean isCompleteAllOnStop() {
968        return completeAllOnStop;
969    }
970
971    public long getCompletionTimeoutCheckerInterval() {
972        return completionTimeoutCheckerInterval;
973    }
974
975    public void setCompletionTimeoutCheckerInterval(long completionTimeoutCheckerInterval) {
976        this.completionTimeoutCheckerInterval = completionTimeoutCheckerInterval;
977    }
978
979    public ExceptionHandler getExceptionHandler() {
980        return exceptionHandler;
981    }
982
983    public void setExceptionHandler(ExceptionHandler exceptionHandler) {
984        this.exceptionHandler = exceptionHandler;
985    }
986
987    public boolean isParallelProcessing() {
988        return parallelProcessing;
989    }
990
991    public void setParallelProcessing(boolean parallelProcessing) {
992        this.parallelProcessing = parallelProcessing;
993    }
994
995    public boolean isOptimisticLocking() {
996        return optimisticLocking;
997    }
998
999    public void setOptimisticLocking(boolean optimisticLocking) {
1000        this.optimisticLocking = optimisticLocking;
1001    }
1002
1003    public AggregationRepository getAggregationRepository() {
1004        return aggregationRepository;
1005    }
1006
1007    public void setAggregationRepository(AggregationRepository aggregationRepository) {
1008        this.aggregationRepository = aggregationRepository;
1009    }
1010
1011    public boolean isDiscardOnCompletionTimeout() {
1012        return discardOnCompletionTimeout;
1013    }
1014
1015    public void setDiscardOnCompletionTimeout(boolean discardOnCompletionTimeout) {
1016        this.discardOnCompletionTimeout = discardOnCompletionTimeout;
1017    }
1018
1019    public void setForceCompletionOnStop(boolean forceCompletionOnStop) {
1020        this.forceCompletionOnStop = forceCompletionOnStop;
1021    }
1022
1023    public void setCompleteAllOnStop(boolean completeAllOnStop) {
1024        this.completeAllOnStop = completeAllOnStop;
1025    }
1026
1027    public void setTimeoutCheckerExecutorService(ScheduledExecutorService timeoutCheckerExecutorService) {
1028        this.timeoutCheckerExecutorService = timeoutCheckerExecutorService;
1029    }
1030
1031    public ScheduledExecutorService getTimeoutCheckerExecutorService() {
1032        return timeoutCheckerExecutorService;
1033    }
1034
1035    public boolean isShutdownTimeoutCheckerExecutorService() {
1036        return shutdownTimeoutCheckerExecutorService;
1037    }
1038
1039    public void setShutdownTimeoutCheckerExecutorService(boolean shutdownTimeoutCheckerExecutorService) {
1040        this.shutdownTimeoutCheckerExecutorService = shutdownTimeoutCheckerExecutorService;
1041    }
1042
1043    public void setOptimisticLockRetryPolicy(OptimisticLockRetryPolicy optimisticLockRetryPolicy) {
1044        this.optimisticLockRetryPolicy = optimisticLockRetryPolicy;
1045    }
1046
1047    public OptimisticLockRetryPolicy getOptimisticLockRetryPolicy() {
1048        return optimisticLockRetryPolicy;
1049    }
1050
1051    public AggregationStrategy getAggregationStrategy() {
1052        return aggregationStrategy;
1053    }
1054
1055    public void setAggregationStrategy(AggregationStrategy aggregationStrategy) {
1056        this.aggregationStrategy = aggregationStrategy;
1057    }
1058
1059    public Expression getCorrelationExpression() {
1060        return correlationExpression;
1061    }
1062
1063    public void setCorrelationExpression(Expression correlationExpression) {
1064        this.correlationExpression = correlationExpression;
1065    }
1066
1067    public AggregateController getAggregateController() {
1068        return aggregateController;
1069    }
1070
1071    public void setAggregateController(AggregateController aggregateController) {
1072        this.aggregateController = aggregateController;
1073    }
1074
1075    /**
1076     * On completion task which keeps the booking of the in progress up to date
1077     */
1078    private final class AggregateOnCompletion implements Synchronization {
1079        private final String exchangeId;
1080
1081        private AggregateOnCompletion(String exchangeId) {
1082            // must use the original exchange id as it could potentially change if send over SEDA etc.
1083            this.exchangeId = exchangeId;
1084        }
1085
1086        public void onFailure(Exchange exchange) {
1087            LOG.trace("Aggregated exchange onFailure: {}", exchange);
1088
1089            // must remember to remove in progress when we failed
1090            inProgressCompleteExchanges.remove(exchangeId);
1091            // do not remove redelivery state as we need it when we redeliver again later
1092        }
1093
1094        public void onComplete(Exchange exchange) {
1095            LOG.trace("Aggregated exchange onComplete: {}", exchange);
1096
1097            // only confirm if we processed without a problem
1098            try {
1099                aggregationRepository.confirm(exchange.getContext(), exchangeId);
1100                // and remove redelivery state as well
1101                redeliveryState.remove(exchangeId);
1102            } finally {
1103                // must remember to remove in progress when we are complete
1104                inProgressCompleteExchanges.remove(exchangeId);
1105            }
1106        }
1107
1108        @Override
1109        public String toString() {
1110            return "AggregateOnCompletion";
1111        }
1112    }
1113
1114    /**
1115     * Background task that looks for aggregated exchanges which is triggered by completion timeouts.
1116     */
1117    private final class AggregationTimeoutMap extends DefaultTimeoutMap<String, String> {
1118
1119        private AggregationTimeoutMap(ScheduledExecutorService executor, long requestMapPollTimeMillis) {
1120            // do NOT use locking on the timeout map as this aggregator has its own shared lock we will use instead
1121            super(executor, requestMapPollTimeMillis, optimisticLocking);
1122        }
1123
1124        @Override
1125        public void purge() {
1126            // must acquire the shared aggregation lock to be able to purge
1127            if (!optimisticLocking) {
1128                lock.lock();
1129            }
1130            try {
1131                super.purge();
1132            } finally {
1133                if (!optimisticLocking) {
1134                    lock.unlock();
1135                }
1136            }
1137        }
1138
1139        @Override
1140        public boolean onEviction(String key, String exchangeId) {
1141            log.debug("Completion timeout triggered for correlation key: {}", key);
1142
1143            boolean inProgress = inProgressCompleteExchanges.contains(exchangeId);
1144            if (inProgress) {
1145                LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
1146                return true;
1147            }
1148
1149            // get the aggregated exchange
1150            boolean evictionStolen = false;
1151            Exchange answer = aggregationRepository.get(camelContext, key);
1152            if (answer == null) {
1153                evictionStolen = true;
1154            } else {
1155                // indicate it was completed by timeout
1156                answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "timeout");
1157                try {
1158                    answer = onCompletion(key, answer, answer, true);
1159                    if (answer != null) {
1160                        onSubmitCompletion(key, answer);
1161                    }
1162                } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
1163                    evictionStolen = true;
1164                }
1165            }
1166
1167            if (optimisticLocking && evictionStolen) {
1168                LOG.debug("Another Camel instance has already successfully correlated or processed this timeout eviction "
1169                          + "for exchange with id: {} and correlation id: {}", exchangeId, key);
1170            }
1171            return true;
1172        }
1173    }
1174
1175    /**
1176     * Background task that triggers completion based on interval.
1177     */
1178    private final class AggregationIntervalTask implements Runnable {
1179
1180        public void run() {
1181            // only run if CamelContext has been fully started
1182            if (!camelContext.getStatus().isStarted()) {
1183                LOG.trace("Completion interval task cannot start due CamelContext({}) has not been started yet", camelContext.getName());
1184                return;
1185            }
1186
1187            LOG.trace("Starting completion interval task");
1188
1189            // trigger completion for all in the repository
1190            Set<String> keys = aggregationRepository.getKeys();
1191
1192            if (keys != null && !keys.isEmpty()) {
1193                // must acquire the shared aggregation lock to be able to trigger interval completion
1194                if (!optimisticLocking) {
1195                    lock.lock();
1196                }
1197                try {
1198                    for (String key : keys) {
1199                        boolean stolenInterval = false;
1200                        Exchange exchange = aggregationRepository.get(camelContext, key);
1201                        if (exchange == null) {
1202                            stolenInterval = true;
1203                        } else {
1204                            LOG.trace("Completion interval triggered for correlation key: {}", key);
1205                            // indicate it was completed by interval
1206                            exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "interval");
1207                            try {
1208                                Exchange answer = onCompletion(key, exchange, exchange, false);
1209                                if (answer != null) {
1210                                    onSubmitCompletion(key, answer);
1211                                }
1212                            } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
1213                                stolenInterval = true;
1214                            }
1215                        }
1216                        if (optimisticLocking && stolenInterval) {
1217                            LOG.debug("Another Camel instance has already processed this interval aggregation for exchange with correlation id: {}", key);
1218                        }
1219                    }
1220                } finally {
1221                    if (!optimisticLocking) {
1222                        lock.unlock();
1223                    }
1224                }
1225            }
1226
1227            LOG.trace("Completion interval task complete");
1228        }
1229    }
1230
1231    /**
1232     * Background task that looks for aggregated exchanges to recover.
1233     */
1234    private final class RecoverTask implements Runnable {
1235        private final RecoverableAggregationRepository recoverable;
1236
1237        private RecoverTask(RecoverableAggregationRepository recoverable) {
1238            this.recoverable = recoverable;
1239        }
1240
1241        public void run() {
1242            // only run if CamelContext has been fully started
1243            if (!camelContext.getStatus().isStarted()) {
1244                LOG.trace("Recover check cannot start due CamelContext({}) has not been started yet", camelContext.getName());
1245                return;
1246            }
1247
1248            LOG.trace("Starting recover check");
1249
1250            // copy the current in progress before doing scan
1251            final Set<String> copyOfInProgress = new LinkedHashSet<>(inProgressCompleteExchanges);
1252
1253            Set<String> exchangeIds = recoverable.scan(camelContext);
1254            for (String exchangeId : exchangeIds) {
1255
1256                // we may shutdown while doing recovery
1257                if (!isRunAllowed()) {
1258                    LOG.info("We are shutting down so stop recovering");
1259                    return;
1260                }
1261                if (!optimisticLocking) {
1262                    lock.lock();
1263                }
1264                try {
1265                    // consider in progress if it was in progress before we did the scan, or currently after we did the scan
1266                    // its safer to consider it in progress than risk duplicates due both in progress + recovered
1267                    boolean inProgress = copyOfInProgress.contains(exchangeId) || inProgressCompleteExchanges.contains(exchangeId);
1268                    if (inProgress) {
1269                        LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
1270                    } else {
1271                        LOG.debug("Loading aggregated exchange with id: {} to be recovered.", exchangeId);
1272                        Exchange exchange = recoverable.recover(camelContext, exchangeId);
1273                        if (exchange != null) {
1274                            // get the correlation key
1275                            String key = exchange.getProperty(Exchange.AGGREGATED_CORRELATION_KEY, String.class);
1276                            // and mark it as redelivered
1277                            exchange.getIn().setHeader(Exchange.REDELIVERED, Boolean.TRUE);
1278
1279                            // get the current redelivery data
1280                            RedeliveryData data = redeliveryState.get(exchange.getExchangeId());
1281
1282                            // if we are exhausted, then move to dead letter channel
1283                            if (data != null && recoverable.getMaximumRedeliveries() > 0 && data.redeliveryCounter >= recoverable.getMaximumRedeliveries()) {
1284                                LOG.warn("The recovered exchange is exhausted after " + recoverable.getMaximumRedeliveries()
1285                                        + " attempts, will now be moved to dead letter channel: " + recoverable.getDeadLetterUri());
1286
1287                                // send to DLC
1288                                try {
1289                                    // set redelivery counter
1290                                    exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter);
1291                                    exchange.getIn().setHeader(Exchange.REDELIVERY_EXHAUSTED, Boolean.TRUE);
1292                                    deadLetterProducerTemplate.send(recoverable.getDeadLetterUri(), exchange);
1293                                } catch (Throwable e) {
1294                                    exchange.setException(e);
1295                                }
1296
1297                                // handle if failed
1298                                if (exchange.getException() != null) {
1299                                    getExceptionHandler().handleException("Failed to move recovered Exchange to dead letter channel: " + recoverable.getDeadLetterUri(), exchange.getException());
1300                                } else {
1301                                    // it was ok, so confirm after it has been moved to dead letter channel, so we wont recover it again
1302                                    recoverable.confirm(camelContext, exchangeId);
1303                                }
1304                            } else {
1305                                // update current redelivery state
1306                                if (data == null) {
1307                                    // create new data
1308                                    data = new RedeliveryData();
1309                                    redeliveryState.put(exchange.getExchangeId(), data);
1310                                }
1311                                data.redeliveryCounter++;
1312
1313                                // set redelivery counter
1314                                exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter);
1315                                if (recoverable.getMaximumRedeliveries() > 0) {
1316                                    exchange.getIn().setHeader(Exchange.REDELIVERY_MAX_COUNTER, recoverable.getMaximumRedeliveries());
1317                                }
1318
1319                                LOG.debug("Delivery attempt: {} to recover aggregated exchange with id: {}", data.redeliveryCounter, exchangeId);
1320
1321                                // not exhaust so resubmit the recovered exchange
1322                                onSubmitCompletion(key, exchange);
1323                            }
1324                        }
1325                    }
1326                } finally {
1327                    if (!optimisticLocking) {
1328                        lock.unlock();
1329                    }
1330                }
1331            }
1332
1333            LOG.trace("Recover check complete");
1334        }
1335    }
1336
1337    @Override
1338    @SuppressWarnings("unchecked")
1339    protected void doStart() throws Exception {
1340        AggregationStrategy strategy = aggregationStrategy;
1341        if (strategy instanceof DelegateAggregationStrategy) {
1342            strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
1343        }
1344        if (strategy instanceof CamelContextAware) {
1345            ((CamelContextAware) strategy).setCamelContext(camelContext);
1346        }
1347        if (strategy instanceof PreCompletionAwareAggregationStrategy) {
1348            preCompletion = true;
1349            LOG.info("PreCompletionAwareAggregationStrategy detected. Aggregator {} is in pre-completion mode.", getId());
1350        }
1351
1352        if (!preCompletion) {
1353            // if not in pre completion mode then check we configured the completion required
1354            if (getCompletionTimeout() <= 0 && getCompletionInterval() <= 0 && getCompletionSize() <= 0 && getCompletionPredicate() == null
1355                    && !isCompletionFromBatchConsumer() && getCompletionTimeoutExpression() == null
1356                    && getCompletionSizeExpression() == null) {
1357                throw new IllegalStateException("At least one of the completions options"
1358                        + " [completionTimeout, completionInterval, completionSize, completionPredicate, completionFromBatchConsumer] must be set");
1359            }
1360        }
1361
1362        if (getCloseCorrelationKeyOnCompletion() != null) {
1363            if (getCloseCorrelationKeyOnCompletion() > 0) {
1364                LOG.info("Using ClosedCorrelationKeys with a LRUCache with a capacity of {}", getCloseCorrelationKeyOnCompletion());
1365                closedCorrelationKeys = LRUCacheFactory.newLRUCache(getCloseCorrelationKeyOnCompletion());
1366            } else {
1367                LOG.info("Using ClosedCorrelationKeys with unbounded capacity");
1368                closedCorrelationKeys = new ConcurrentHashMap<>();
1369            }
1370        }
1371
1372        if (aggregationRepository == null) {
1373            aggregationRepository = new MemoryAggregationRepository(optimisticLocking);
1374            LOG.info("Defaulting to MemoryAggregationRepository");
1375        }
1376
1377        if (optimisticLocking) {
1378            if (!(aggregationRepository instanceof OptimisticLockingAggregationRepository)) {
1379                throw new IllegalArgumentException("Optimistic locking cannot be enabled without using an AggregationRepository that implements OptimisticLockingAggregationRepository");
1380            }
1381            LOG.info("Optimistic locking is enabled");
1382        }
1383
1384        ServiceHelper.startServices(aggregationStrategy, processor, aggregationRepository);
1385
1386        // should we use recover checker
1387        if (aggregationRepository instanceof RecoverableAggregationRepository) {
1388            RecoverableAggregationRepository recoverable = (RecoverableAggregationRepository) aggregationRepository;
1389            if (recoverable.isUseRecovery()) {
1390                long interval = recoverable.getRecoveryIntervalInMillis();
1391                if (interval <= 0) {
1392                    throw new IllegalArgumentException("AggregationRepository has recovery enabled and the RecoveryInterval option must be a positive number, was: " + interval);
1393                }
1394
1395                // create a background recover thread to check every interval
1396                recoverService = camelContext.getExecutorServiceManager().newScheduledThreadPool(this, "AggregateRecoverChecker", 1);
1397                Runnable recoverTask = new RecoverTask(recoverable);
1398                LOG.info("Using RecoverableAggregationRepository by scheduling recover checker to run every {} millis.", interval);
1399                // use fixed delay so there is X interval between each run
1400                recoverService.scheduleWithFixedDelay(recoverTask, 1000L, interval, TimeUnit.MILLISECONDS);
1401
1402                if (recoverable.getDeadLetterUri() != null) {
1403                    int max = recoverable.getMaximumRedeliveries();
1404                    if (max <= 0) {
1405                        throw new IllegalArgumentException("Option maximumRedeliveries must be a positive number, was: " + max);
1406                    }
1407                    LOG.info("After {} failed redelivery attempts Exchanges will be moved to deadLetterUri: {}", max, recoverable.getDeadLetterUri());
1408
1409                    // dead letter uri must be a valid endpoint
1410                    Endpoint endpoint = camelContext.getEndpoint(recoverable.getDeadLetterUri());
1411                    if (endpoint == null) {
1412                        throw new NoSuchEndpointException(recoverable.getDeadLetterUri());
1413                    }
1414                    deadLetterProducerTemplate = camelContext.createProducerTemplate();
1415                }
1416            }
1417        }
1418
1419        if (getCompletionInterval() > 0 && getCompletionTimeout() > 0) {
1420            throw new IllegalArgumentException("Only one of completionInterval or completionTimeout can be used, not both.");
1421        }
1422        if (getCompletionInterval() > 0) {
1423            LOG.info("Using CompletionInterval to run every {} millis.", getCompletionInterval());
1424            if (getTimeoutCheckerExecutorService() == null) {
1425                setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
1426                shutdownTimeoutCheckerExecutorService = true;
1427            }
1428            // trigger completion based on interval
1429            getTimeoutCheckerExecutorService().scheduleAtFixedRate(new AggregationIntervalTask(), getCompletionInterval(), getCompletionInterval(), TimeUnit.MILLISECONDS);
1430        }
1431
1432        // start timeout service if its in use
1433        if (getCompletionTimeout() > 0 || getCompletionTimeoutExpression() != null) {
1434            LOG.info("Using CompletionTimeout to trigger after {} millis of inactivity.", getCompletionTimeout());
1435            if (getTimeoutCheckerExecutorService() == null) {
1436                setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
1437                shutdownTimeoutCheckerExecutorService = true;
1438            }
1439            // check for timed out aggregated messages once every second
1440            timeoutMap = new AggregationTimeoutMap(getTimeoutCheckerExecutorService(), getCompletionTimeoutCheckerInterval());
1441            // fill in existing timeout values from the aggregation repository, for example if a restart occurred, then we
1442            // need to re-establish the timeout map so timeout can trigger
1443            restoreTimeoutMapFromAggregationRepository();
1444            ServiceHelper.startService(timeoutMap);
1445        }
1446
1447        if (aggregateController == null) {
1448            aggregateController = new DefaultAggregateController();
1449        }
1450        aggregateController.onStart(this);
1451    }
1452
1453    @Override
1454    protected void doStop() throws Exception {
1455        // note: we cannot do doForceCompletionOnStop from this doStop method
1456        // as this is handled in the prepareShutdown method which is also invoked when stopping a route
1457        // and is better suited for preparing to shutdown than this doStop method is
1458
1459        if (aggregateController != null) {
1460            aggregateController.onStop(this);
1461        }
1462
1463        if (recoverService != null) {
1464            camelContext.getExecutorServiceManager().shutdown(recoverService);
1465        }
1466
1467        if (shutdownTimeoutCheckerExecutorService && timeoutCheckerExecutorService != null) {
1468            camelContext.getExecutorServiceManager().shutdown(timeoutCheckerExecutorService);
1469            timeoutCheckerExecutorService = null;
1470            shutdownTimeoutCheckerExecutorService = false;
1471        }
1472
1473        ServiceHelper.stopServices(timeoutMap, processor, deadLetterProducerTemplate);
1474
1475        if (closedCorrelationKeys != null) {
1476            // it may be a service so stop it as well
1477            ServiceHelper.stopService(closedCorrelationKeys);
1478            closedCorrelationKeys.clear();
1479        }
1480        batchConsumerCorrelationKeys.clear();
1481        redeliveryState.clear();
1482    }
1483
1484    @Override
1485    public void prepareShutdown(boolean suspendOnly, boolean forced) {
1486        // we are shutting down, so force completion if this option was enabled
1487        // but only do this when forced=false, as that is when we have chance to
1488        // send out new messages to be routed by Camel. When forced=true, then
1489        // we have to shutdown in a hurry
1490        if (!forced && forceCompletionOnStop) {
1491            doForceCompletionOnStop();
1492        }
1493    }
1494
1495    @Override
1496    public boolean deferShutdown(ShutdownRunningTask shutdownRunningTask) {
1497        // not in use
1498        return true;
1499    }
1500
1501    @Override
1502    public int getPendingExchangesSize() {
1503        if (completeAllOnStop) {
1504            // we want to regard all pending exchanges in the repo as inflight
1505            Set<String> keys = getAggregationRepository().getKeys();
1506            return keys != null ? keys.size() : 0;
1507        } else {
1508            return 0;
1509        }
1510    }
1511
1512    private void doForceCompletionOnStop() {
1513        int expected = forceCompletionOfAllGroups();
1514
1515        StopWatch watch = new StopWatch();
1516        while (inProgressCompleteExchanges.size() > 0) {
1517            LOG.trace("Waiting for {} inflight exchanges to complete", getInProgressCompleteExchanges());
1518            try {
1519                Thread.sleep(100);
1520            } catch (InterruptedException e) {
1521                // break out as we got interrupted such as the JVM terminating
1522                LOG.warn("Interrupted while waiting for {} inflight exchanges to complete.", getInProgressCompleteExchanges());
1523                break;
1524            }
1525        }
1526
1527        if (expected > 0) {
1528            LOG.info("Forcing completion of all groups with {} exchanges completed in {}", expected, TimeUtils.printDuration(watch.taken()));
1529        }
1530    }
1531
1532    @Override
1533    protected void doShutdown() throws Exception {
1534        // shutdown aggregation repository and the strategy
1535        ServiceHelper.stopAndShutdownServices(aggregationRepository, aggregationStrategy);
1536
1537        // cleanup when shutting down
1538        inProgressCompleteExchanges.clear();
1539
1540        if (shutdownExecutorService) {
1541            camelContext.getExecutorServiceManager().shutdownNow(executorService);
1542        }
1543        if (shutdownTimeoutCheckerExecutorService) {
1544            camelContext.getExecutorServiceManager().shutdownNow(timeoutCheckerExecutorService);
1545            timeoutCheckerExecutorService = null;
1546        }
1547
1548        super.doShutdown();
1549    }
1550
1551    public int forceCompletionOfGroup(String key) {
1552        // must acquire the shared aggregation lock to be able to trigger force completion
1553        int total = 0;
1554
1555        if (!optimisticLocking) {
1556            lock.lock();
1557        }
1558        try {
1559            Exchange exchange = aggregationRepository.get(camelContext, key);
1560            if (exchange != null) {
1561                total = 1;
1562                LOG.trace("Force completion triggered for correlation key: {}", key);
1563                // indicate it was completed by a force completion request
1564                exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force");
1565                Exchange answer = onCompletion(key, exchange, exchange, false);
1566                if (answer != null) {
1567                    onSubmitCompletion(key, answer);
1568                }
1569            }
1570        } finally {
1571            if (!optimisticLocking) {
1572                lock.unlock(); 
1573            }
1574        }
1575        LOG.trace("Completed force completion of group {}", key);
1576
1577        if (total > 0) {
1578            LOG.debug("Forcing completion of group {} with {} exchanges", key, total);
1579        }
1580        return total;
1581    }
1582
1583    public int forceCompletionOfAllGroups() {
1584
1585        // only run if CamelContext has been fully started or is stopping
1586        boolean allow = camelContext.getStatus().isStarted() || camelContext.getStatus().isStopping();
1587        if (!allow) {
1588            LOG.warn("Cannot start force completion of all groups because CamelContext({}) has not been started", camelContext.getName());
1589            return 0;
1590        }
1591
1592        LOG.trace("Starting force completion of all groups task");
1593
1594        // trigger completion for all in the repository
1595        Set<String> keys = aggregationRepository.getKeys();
1596
1597        int total = 0;
1598        if (keys != null && !keys.isEmpty()) {
1599            // must acquire the shared aggregation lock to be able to trigger force completion
1600            if (!optimisticLocking) {
1601                lock.lock(); 
1602            }
1603            total = keys.size();
1604            try {
1605                for (String key : keys) {
1606                    Exchange exchange = aggregationRepository.get(camelContext, key);
1607                    if (exchange != null) {
1608                        LOG.trace("Force completion triggered for correlation key: {}", key);
1609                        // indicate it was completed by a force completion request
1610                        exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force");
1611                        Exchange answer = onCompletion(key, exchange, exchange, false);
1612                        if (answer != null) {
1613                            onSubmitCompletion(key, answer);
1614                        }
1615                    }
1616                }
1617            } finally {
1618                if (!optimisticLocking) {
1619                    lock.unlock();
1620                }
1621            }
1622        }
1623        LOG.trace("Completed force completion of all groups task");
1624
1625        if (total > 0) {
1626            LOG.debug("Forcing completion of all groups with {} exchanges", total);
1627        }
1628        return total;
1629    }
1630
1631}