001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.camel.processor;
018
019import java.io.Closeable;
020import java.util.ArrayList;
021import java.util.Collection;
022import java.util.Iterator;
023import java.util.List;
024import java.util.Map;
025import java.util.concurrent.Callable;
026import java.util.concurrent.CompletionService;
027import java.util.concurrent.ConcurrentHashMap;
028import java.util.concurrent.ConcurrentMap;
029import java.util.concurrent.CountDownLatch;
030import java.util.concurrent.ExecutionException;
031import java.util.concurrent.ExecutorCompletionService;
032import java.util.concurrent.ExecutorService;
033import java.util.concurrent.Future;
034import java.util.concurrent.TimeUnit;
035import java.util.concurrent.atomic.AtomicBoolean;
036import java.util.concurrent.atomic.AtomicInteger;
037
038import org.apache.camel.AsyncCallback;
039import org.apache.camel.AsyncProcessor;
040import org.apache.camel.CamelContext;
041import org.apache.camel.CamelExchangeException;
042import org.apache.camel.Endpoint;
043import org.apache.camel.ErrorHandlerFactory;
044import org.apache.camel.Exchange;
045import org.apache.camel.Navigate;
046import org.apache.camel.Processor;
047import org.apache.camel.Producer;
048import org.apache.camel.Traceable;
049import org.apache.camel.processor.aggregate.AggregationStrategy;
050import org.apache.camel.processor.aggregate.CompletionAwareAggregationStrategy;
051import org.apache.camel.processor.aggregate.TimeoutAwareAggregationStrategy;
052import org.apache.camel.spi.RouteContext;
053import org.apache.camel.spi.TracedRouteNodes;
054import org.apache.camel.spi.UnitOfWork;
055import org.apache.camel.support.ServiceSupport;
056import org.apache.camel.util.AsyncProcessorConverterHelper;
057import org.apache.camel.util.AsyncProcessorHelper;
058import org.apache.camel.util.CastUtils;
059import org.apache.camel.util.EventHelper;
060import org.apache.camel.util.ExchangeHelper;
061import org.apache.camel.util.IOHelper;
062import org.apache.camel.util.KeyValueHolder;
063import org.apache.camel.util.ObjectHelper;
064import org.apache.camel.util.ServiceHelper;
065import org.apache.camel.util.StopWatch;
066import org.apache.camel.util.concurrent.AtomicException;
067import org.apache.camel.util.concurrent.AtomicExchange;
068import org.apache.camel.util.concurrent.SubmitOrderedCompletionService;
069import org.slf4j.Logger;
070import org.slf4j.LoggerFactory;
071
072import static org.apache.camel.util.ObjectHelper.notNull;
073
074
075/**
076 * Implements the Multicast pattern to send a message exchange to a number of
077 * endpoints, each endpoint receiving a copy of the message exchange.
078 *
079 * @version 
080 * @see Pipeline
081 */
082public class MulticastProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable {
083
084    private static final Logger LOG = LoggerFactory.getLogger(MulticastProcessor.class);
085
086    /**
087     * Class that represent each step in the multicast route to do
088     */
089    static final class DefaultProcessorExchangePair implements ProcessorExchangePair {
090        private final int index;
091        private final Processor processor;
092        private final Processor prepared;
093        private final Exchange exchange;
094
095        private DefaultProcessorExchangePair(int index, Processor processor, Processor prepared, Exchange exchange) {
096            this.index = index;
097            this.processor = processor;
098            this.prepared = prepared;
099            this.exchange = exchange;
100        }
101
102        public int getIndex() {
103            return index;
104        }
105
106        public Exchange getExchange() {
107            return exchange;
108        }
109
110        public Producer getProducer() {
111            if (processor instanceof Producer) {
112                return (Producer) processor;
113            }
114            return null;
115        }
116
117        public Processor getProcessor() {
118            return prepared;
119        }
120
121        public void begin() {
122            // noop
123        }
124
125        public void done() {
126            // noop
127        }
128
129    }
130
131    /**
132     * Class that represents prepared fine grained error handlers when processing multicasted/splitted exchanges
133     * <p/>
134     * See the <tt>createProcessorExchangePair</tt> and <tt>createErrorHandler</tt> methods.
135     */
136    static final class PreparedErrorHandler extends KeyValueHolder<RouteContext, Processor> {
137
138        public PreparedErrorHandler(RouteContext key, Processor value) {
139            super(key, value);
140        }
141
142    }
143
144    protected final Processor onPrepare;
145    private final CamelContext camelContext;
146    private Collection<Processor> processors;
147    private final AggregationStrategy aggregationStrategy;
148    private final boolean parallelProcessing;
149    private final boolean streaming;
150    private final boolean parallelAggregate;
151    private final boolean stopOnException;
152    private final ExecutorService executorService;
153    private final boolean shutdownExecutorService;
154    private ExecutorService aggregateExecutorService;
155    private final long timeout;
156    private final ConcurrentMap<PreparedErrorHandler, Processor> errorHandlers = new ConcurrentHashMap<PreparedErrorHandler, Processor>();
157    private final boolean shareUnitOfWork;
158
159    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors) {
160        this(camelContext, processors, null);
161    }
162
163    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy) {
164        this(camelContext, processors, aggregationStrategy, false, null, false, false, false, 0, null, false, false);
165    }
166
167    @Deprecated
168    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
169                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService,
170                              boolean streaming, boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork) {
171        this(camelContext, processors, aggregationStrategy, parallelProcessing, executorService, shutdownExecutorService,
172                streaming, stopOnException, timeout, onPrepare, shareUnitOfWork, false);
173    }
174
175    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
176                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService, boolean streaming,
177                              boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork,
178                              boolean parallelAggregate) {
179        notNull(camelContext, "camelContext");
180        this.camelContext = camelContext;
181        this.processors = processors;
182        this.aggregationStrategy = aggregationStrategy;
183        this.executorService = executorService;
184        this.shutdownExecutorService = shutdownExecutorService;
185        this.streaming = streaming;
186        this.stopOnException = stopOnException;
187        // must enable parallel if executor service is provided
188        this.parallelProcessing = parallelProcessing || executorService != null;
189        this.timeout = timeout;
190        this.onPrepare = onPrepare;
191        this.shareUnitOfWork = shareUnitOfWork;
192        this.parallelAggregate = parallelAggregate;
193    }
194
195    @Override
196    public String toString() {
197        return "Multicast[" + getProcessors() + "]";
198    }
199
200    public String getTraceLabel() {
201        return "multicast";
202    }
203
204    public CamelContext getCamelContext() {
205        return camelContext;
206    }
207
208    public void process(Exchange exchange) throws Exception {
209        AsyncProcessorHelper.process(this, exchange);
210    }
211
212    public boolean process(Exchange exchange, AsyncCallback callback) {
213        final AtomicExchange result = new AtomicExchange();
214        Iterable<ProcessorExchangePair> pairs = null;
215
216        try {
217            boolean sync = true;
218
219            pairs = createProcessorExchangePairs(exchange);
220
221            if (isParallelProcessing()) {
222                // ensure an executor is set when running in parallel
223                ObjectHelper.notNull(executorService, "executorService", this);
224                doProcessParallel(exchange, result, pairs, isStreaming(), callback);
225            } else {
226                sync = doProcessSequential(exchange, result, pairs, callback);
227            }
228
229            if (!sync) {
230                // the remainder of the multicast will be completed async
231                // so we break out now, then the callback will be invoked which then continue routing from where we left here
232                return false;
233            }
234        } catch (Throwable e) {
235            exchange.setException(e);
236            // unexpected exception was thrown, maybe from iterator etc. so do not regard as exhausted
237            // and do the done work
238            doDone(exchange, null, pairs, callback, true, false);
239            return true;
240        }
241
242        // multicasting was processed successfully
243        // and do the done work
244        Exchange subExchange = result.get() != null ? result.get() : null;
245        doDone(exchange, subExchange, pairs, callback, true, true);
246        return true;
247    }
248
249    protected void doProcessParallel(final Exchange original, final AtomicExchange result, final Iterable<ProcessorExchangePair> pairs,
250                                     final boolean streaming, final AsyncCallback callback) throws Exception {
251
252        ObjectHelper.notNull(executorService, "ExecutorService", this);
253        ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);
254
255        final CompletionService<Exchange> completion;
256        if (streaming) {
257            // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
258            completion = new ExecutorCompletionService<Exchange>(executorService);
259        } else {
260            // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
261            completion = new SubmitOrderedCompletionService<Exchange>(executorService);
262        }
263
264        final AtomicInteger total = new AtomicInteger(0);
265        final Iterator<ProcessorExchangePair> it = pairs.iterator();
266
267        if (it.hasNext()) {
268            // when parallel then aggregate on the fly
269            final AtomicBoolean running = new AtomicBoolean(true);
270            final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
271            final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
272            final AtomicException executionException = new AtomicException();
273
274            // issue task to execute in separate thread so it can aggregate on-the-fly
275            // while we submit new tasks, and those tasks complete concurrently
276            // this allows us to optimize work and reduce memory consumption
277            final AggregateOnTheFlyTask aggregateOnTheFlyTask = new AggregateOnTheFlyTask(result, original, total, completion, running,
278                    aggregationOnTheFlyDone, allTasksSubmitted, executionException);
279            final AtomicBoolean aggregationTaskSubmitted = new AtomicBoolean();
280
281            LOG.trace("Starting to submit parallel tasks");
282
283            while (it.hasNext()) {
284                final ProcessorExchangePair pair = it.next();
285                final Exchange subExchange = pair.getExchange();
286                updateNewExchange(subExchange, total.intValue(), pairs, it);
287
288                completion.submit(new Callable<Exchange>() {
289                    public Exchange call() throws Exception {
290                        // only start the aggregation task when the task is being executed to avoid staring
291                        // the aggregation task to early and pile up too many threads
292                        if (aggregationTaskSubmitted.compareAndSet(false, true)) {
293                            // but only submit the task once
294                            aggregateExecutorService.submit(aggregateOnTheFlyTask);
295                        }
296
297                        if (!running.get()) {
298                            // do not start processing the task if we are not running
299                            return subExchange;
300                        }
301
302                        try {
303                            doProcessParallel(pair);
304                        } catch (Throwable e) {
305                            subExchange.setException(e);
306                        }
307
308                        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
309                        Integer number = getExchangeIndex(subExchange);
310                        boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
311                        if (stopOnException && !continueProcessing) {
312                            // signal to stop running
313                            running.set(false);
314                            // throw caused exception
315                            if (subExchange.getException() != null) {
316                                // wrap in exception to explain where it failed
317                                CamelExchangeException cause = new CamelExchangeException("Parallel processing failed for number " + number, subExchange, subExchange.getException());
318                                subExchange.setException(cause);
319                            }
320                        }
321
322                        LOG.trace("Parallel processing complete for exchange: {}", subExchange);
323                        return subExchange;
324                    }
325                });
326
327                total.incrementAndGet();
328            }
329
330            // signal all tasks has been submitted
331            LOG.trace("Signaling that all {} tasks has been submitted.", total.get());
332            allTasksSubmitted.set(true);
333
334            // its to hard to do parallel async routing so we let the caller thread be synchronously
335            // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
336            // wait for aggregation to be done
337            LOG.debug("Waiting for on-the-fly aggregation to complete aggregating {} responses for exchangeId: {}", total.get(), original.getExchangeId());
338            aggregationOnTheFlyDone.await();
339
340            // did we fail for whatever reason, if so throw that caused exception
341            if (executionException.get() != null) {
342                if (LOG.isDebugEnabled()) {
343                    LOG.debug("Parallel processing failed due {}", executionException.get().getMessage());
344                }
345                throw executionException.get();
346            }
347        }
348
349        // no everything is okay so we are done
350        LOG.debug("Done parallel processing {} exchanges", total);
351    }
352
353    /**
354     * Task to aggregate on-the-fly for completed tasks when using parallel processing.
355     * <p/>
356     * This ensures lower memory consumption as we do not need to keep all completed tasks in memory
357     * before we perform aggregation. Instead this separate thread will run and aggregate when new
358     * completed tasks is done.
359     * <p/>
360     * The logic is fairly complex as this implementation has to keep track how far it got, and also
361     * signal back to the <i>main</t> thread when its done, so the <i>main</t> thread can continue
362     * processing when the entire splitting is done.
363     */
364    private final class AggregateOnTheFlyTask implements Runnable {
365
366        private final AtomicExchange result;
367        private final Exchange original;
368        private final AtomicInteger total;
369        private final CompletionService<Exchange> completion;
370        private final AtomicBoolean running;
371        private final CountDownLatch aggregationOnTheFlyDone;
372        private final AtomicBoolean allTasksSubmitted;
373        private final AtomicException executionException;
374
375        private AggregateOnTheFlyTask(AtomicExchange result, Exchange original, AtomicInteger total,
376                                      CompletionService<Exchange> completion, AtomicBoolean running,
377                                      CountDownLatch aggregationOnTheFlyDone, AtomicBoolean allTasksSubmitted,
378                                      AtomicException executionException) {
379            this.result = result;
380            this.original = original;
381            this.total = total;
382            this.completion = completion;
383            this.running = running;
384            this.aggregationOnTheFlyDone = aggregationOnTheFlyDone;
385            this.allTasksSubmitted = allTasksSubmitted;
386            this.executionException = executionException;
387        }
388
389        public void run() {
390            LOG.trace("Aggregate on the fly task started for exchangeId: {}", original.getExchangeId());
391
392            try {
393                aggregateOnTheFly();
394            } catch (Throwable e) {
395                if (e instanceof Exception) {
396                    executionException.set((Exception) e);
397                } else {
398                    executionException.set(ObjectHelper.wrapRuntimeCamelException(e));
399                }
400            } finally {
401                // must signal we are done so the latch can open and let the other thread continue processing
402                LOG.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId());
403                LOG.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId());
404                aggregationOnTheFlyDone.countDown();
405            }
406        }
407
408        private void aggregateOnTheFly() throws InterruptedException, ExecutionException {
409            boolean timedOut = false;
410            boolean stoppedOnException = false;
411            final StopWatch watch = new StopWatch();
412            int aggregated = 0;
413            boolean done = false;
414            // not a for loop as on the fly may still run
415            while (!done) {
416                // check if we have already aggregate everything
417                if (allTasksSubmitted.get() && aggregated >= total.get()) {
418                    LOG.debug("Done aggregating {} exchanges on the fly.", aggregated);
419                    break;
420                }
421
422                Future<Exchange> future;
423                if (timedOut) {
424                    // we are timed out but try to grab if some tasks has been completed
425                    // poll will return null if no tasks is present
426                    future = completion.poll();
427                    LOG.trace("Polled completion task #{} after timeout to grab already completed tasks: {}", aggregated, future);
428                } else if (timeout > 0) {
429                    long left = timeout - watch.taken();
430                    if (left < 0) {
431                        left = 0;
432                    }
433                    LOG.trace("Polling completion task #{} using timeout {} millis.", aggregated, left);
434                    future = completion.poll(left, TimeUnit.MILLISECONDS);
435                } else {
436                    LOG.trace("Polling completion task #{}", aggregated);
437                    // we must not block so poll every second
438                    future = completion.poll(1, TimeUnit.SECONDS);
439                    if (future == null) {
440                        // and continue loop which will recheck if we are done
441                        continue;
442                    }
443                }
444
445                if (future == null) {
446                    // timeout occurred
447                    AggregationStrategy strategy = getAggregationStrategy(null);
448                    if (strategy instanceof TimeoutAwareAggregationStrategy) {
449                        // notify the strategy we timed out
450                        Exchange oldExchange = result.get();
451                        if (oldExchange == null) {
452                            // if they all timed out the result may not have been set yet, so use the original exchange
453                            oldExchange = original;
454                        }
455                        ((TimeoutAwareAggregationStrategy) strategy).timeout(oldExchange, aggregated, total.intValue(), timeout);
456                    } else {
457                        // log a WARN we timed out since it will not be aggregated and the Exchange will be lost
458                        LOG.warn("Parallel processing timed out after {} millis for number {}. This task will be cancelled and will not be aggregated.", timeout, aggregated);
459                    }
460                    LOG.debug("Timeout occurred after {} millis for number {} task.", timeout, aggregated);
461                    timedOut = true;
462
463                    // mark that index as timed out, which allows us to try to retrieve
464                    // any already completed tasks in the next loop
465                    if (completion instanceof SubmitOrderedCompletionService) {
466                        ((SubmitOrderedCompletionService<?>) completion).timeoutTask();
467                    }
468                } else {
469                    // there is a result to aggregate
470                    Exchange subExchange = future.get();
471
472                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
473                    Integer number = getExchangeIndex(subExchange);
474                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
475                    if (stopOnException && !continueProcessing) {
476                        // we want to stop on exception and an exception or failure occurred
477                        // this is similar to what the pipeline does, so we should do the same to not surprise end users
478                        // so we should set the failed exchange as the result and break out
479                        result.set(subExchange);
480                        stoppedOnException = true;
481                        break;
482                    }
483
484                    // we got a result so aggregate it
485                    if (parallelAggregate) {
486                        doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
487                    } else {
488                        doAggregate(getAggregationStrategy(subExchange), result, subExchange);
489                    }
490                }
491
492                aggregated++;
493            }
494
495            if (timedOut || stoppedOnException) {
496                if (timedOut) {
497                    LOG.debug("Cancelling tasks due timeout after {} millis.", timeout);
498                }
499                if (stoppedOnException) {
500                    LOG.debug("Cancelling tasks due stopOnException.");
501                }
502                // cancel tasks as we timed out (its safe to cancel done tasks)
503                running.set(false);
504            }
505        }
506    }
507
508    protected boolean doProcessSequential(Exchange original, AtomicExchange result, Iterable<ProcessorExchangePair> pairs, AsyncCallback callback) throws Exception {
509        AtomicInteger total = new AtomicInteger();
510        Iterator<ProcessorExchangePair> it = pairs.iterator();
511
512        while (it.hasNext()) {
513            ProcessorExchangePair pair = it.next();
514            Exchange subExchange = pair.getExchange();
515            updateNewExchange(subExchange, total.get(), pairs, it);
516
517            boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
518            if (!sync) {
519                if (LOG.isTraceEnabled()) {
520                    LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", pair.getExchange().getExchangeId());
521                }
522                // the remainder of the multicast will be completed async
523                // so we break out now, then the callback will be invoked which then continue routing from where we left here
524                return false;
525            }
526
527            if (LOG.isTraceEnabled()) {
528                LOG.trace("Processing exchangeId: {} is continued being processed synchronously", pair.getExchange().getExchangeId());
529            }
530
531            // Decide whether to continue with the multicast or not; similar logic to the Pipeline
532            // remember to test for stop on exception and aggregate before copying back results
533            boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
534            if (stopOnException && !continueProcessing) {
535                if (subExchange.getException() != null) {
536                    // wrap in exception to explain where it failed
537                    CamelExchangeException cause = new CamelExchangeException("Sequential processing failed for number " + total.get(), subExchange, subExchange.getException());
538                    subExchange.setException(cause);
539                }
540                // we want to stop on exception, and the exception was handled by the error handler
541                // this is similar to what the pipeline does, so we should do the same to not surprise end users
542                // so we should set the failed exchange as the result and be done
543                result.set(subExchange);
544                return true;
545            }
546
547            LOG.trace("Sequential processing complete for number {} exchange: {}", total, subExchange);
548
549            if (parallelAggregate) {
550                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
551            } else {
552                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
553            }
554            
555            total.incrementAndGet();
556        }
557
558        LOG.debug("Done sequential processing {} exchanges", total);
559
560        return true;
561    }
562
563    private boolean doProcessSequential(final Exchange original, final AtomicExchange result,
564                                        final Iterable<ProcessorExchangePair> pairs, final Iterator<ProcessorExchangePair> it,
565                                        final ProcessorExchangePair pair, final AsyncCallback callback, final AtomicInteger total) {
566        boolean sync = true;
567
568        final Exchange exchange = pair.getExchange();
569        Processor processor = pair.getProcessor();
570        final Producer producer = pair.getProducer();
571
572        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
573
574        // compute time taken if sending to another endpoint
575        final StopWatch watch = producer != null ? new StopWatch() : null;
576
577        try {
578            // prepare tracing starting from a new block
579            if (traced != null) {
580                traced.pushBlock();
581            }
582
583            if (producer != null) {
584                EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
585            }
586            // let the prepared process it, remember to begin the exchange pair
587            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
588            pair.begin();
589            sync = async.process(exchange, new AsyncCallback() {
590                public void done(boolean doneSync) {
591                    // we are done with the exchange pair
592                    pair.done();
593
594                    // okay we are done, so notify the exchange was sent
595                    if (producer != null) {
596                        long timeTaken = watch.stop();
597                        Endpoint endpoint = producer.getEndpoint();
598                        // emit event that the exchange was sent to the endpoint
599                        EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
600                    }
601
602                    // we only have to handle async completion of the routing slip
603                    if (doneSync) {
604                        return;
605                    }
606
607                    // continue processing the multicast asynchronously
608                    Exchange subExchange = exchange;
609
610                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
611                    // remember to test for stop on exception and aggregate before copying back results
612                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
613                    if (stopOnException && !continueProcessing) {
614                        if (subExchange.getException() != null) {
615                            // wrap in exception to explain where it failed
616                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
617                        } else {
618                            // we want to stop on exception, and the exception was handled by the error handler
619                            // this is similar to what the pipeline does, so we should do the same to not surprise end users
620                            // so we should set the failed exchange as the result and be done
621                            result.set(subExchange);
622                        }
623                        // and do the done work
624                        doDone(original, subExchange, pairs, callback, false, true);
625                        return;
626                    }
627
628                    try {
629                        if (parallelAggregate) {
630                            doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
631                        } else {
632                            doAggregate(getAggregationStrategy(subExchange), result, subExchange);
633                        }
634                    } catch (Throwable e) {
635                        // wrap in exception to explain where it failed
636                        subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
637                        // and do the done work
638                        doDone(original, subExchange, pairs, callback, false, true);
639                        return;
640                    }
641
642                    total.incrementAndGet();
643
644                    // maybe there are more processors to multicast
645                    while (it.hasNext()) {
646
647                        // prepare and run the next
648                        ProcessorExchangePair pair = it.next();
649                        subExchange = pair.getExchange();
650                        updateNewExchange(subExchange, total.get(), pairs, it);
651                        boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
652
653                        if (!sync) {
654                            LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", original.getExchangeId());
655                            return;
656                        }
657
658                        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
659                        // remember to test for stop on exception and aggregate before copying back results
660                        continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
661                        if (stopOnException && !continueProcessing) {
662                            if (subExchange.getException() != null) {
663                                // wrap in exception to explain where it failed
664                                subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
665                            } else {
666                                // we want to stop on exception, and the exception was handled by the error handler
667                                // this is similar to what the pipeline does, so we should do the same to not surprise end users
668                                // so we should set the failed exchange as the result and be done
669                                result.set(subExchange);
670                            }
671                            // and do the done work
672                            doDone(original, subExchange, pairs, callback, false, true);
673                            return;
674                        }
675
676                        // must catch any exceptions from aggregation
677                        try {
678                            if (parallelAggregate) {
679                                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
680                            } else {
681                                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
682                            }
683                        } catch (Throwable e) {
684                            // wrap in exception to explain where it failed
685                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
686                            // and do the done work
687                            doDone(original, subExchange, pairs, callback, false, true);
688                            return;
689                        }
690
691                        total.incrementAndGet();
692                    }
693
694                    // do the done work
695                    subExchange = result.get() != null ? result.get() : null;
696                    doDone(original, subExchange, pairs, callback, false, true);
697                }
698            });
699        } finally {
700            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
701            if (traced != null) {
702                traced.popBlock();
703            }
704        }
705
706        return sync;
707    }
708
709    private void doProcessParallel(final ProcessorExchangePair pair) throws Exception {
710        final Exchange exchange = pair.getExchange();
711        Processor processor = pair.getProcessor();
712        Producer producer = pair.getProducer();
713
714        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
715
716        // compute time taken if sending to another endpoint
717        StopWatch watch = null;
718        if (producer != null) {
719            watch = new StopWatch();
720        }
721
722        try {
723            // prepare tracing starting from a new block
724            if (traced != null) {
725                traced.pushBlock();
726            }
727
728            if (producer != null) {
729                EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
730            }
731            // let the prepared process it, remember to begin the exchange pair
732            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
733            pair.begin();
734            // we invoke it synchronously as parallel async routing is too hard
735            AsyncProcessorHelper.process(async, exchange);
736        } finally {
737            pair.done();
738            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
739            if (traced != null) {
740                traced.popBlock();
741            }
742            if (producer != null) {
743                long timeTaken = watch.stop();
744                Endpoint endpoint = producer.getEndpoint();
745                // emit event that the exchange was sent to the endpoint
746                // this is okay to do here in the finally block, as the processing is not using the async routing engine
747                //( we invoke it synchronously as parallel async routing is too hard)
748                EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
749            }
750        }
751    }
752
753    /**
754     * Common work which must be done when we are done multicasting.
755     * <p/>
756     * This logic applies for both running synchronous and asynchronous as there are multiple exist points
757     * when using the asynchronous routing engine. And therefore we want the logic in one method instead
758     * of being scattered.
759     *
760     * @param original     the original exchange
761     * @param subExchange  the current sub exchange, can be <tt>null</tt> for the synchronous part
762     * @param pairs        the pairs with the exchanges to process
763     * @param callback     the callback
764     * @param doneSync     the <tt>doneSync</tt> parameter to call on callback
765     * @param forceExhaust whether or not error handling is exhausted
766     */
767    protected void doDone(Exchange original, Exchange subExchange, final Iterable<ProcessorExchangePair> pairs,
768                          AsyncCallback callback, boolean doneSync, boolean forceExhaust) {
769
770        // we are done so close the pairs iterator
771        if (pairs != null && pairs instanceof Closeable) {
772            IOHelper.close((Closeable) pairs, "pairs", LOG);
773        }
774
775        AggregationStrategy strategy = getAggregationStrategy(subExchange);
776        // invoke the on completion callback
777        if (strategy instanceof CompletionAwareAggregationStrategy) {
778            ((CompletionAwareAggregationStrategy) strategy).onCompletion(subExchange);
779        }
780
781        // cleanup any per exchange aggregation strategy
782        removeAggregationStrategyFromExchange(original);
783
784        // we need to know if there was an exception, and if the stopOnException option was enabled
785        // also we would need to know if any error handler has attempted redelivery and exhausted
786        boolean stoppedOnException = false;
787        boolean exception = false;
788        boolean exhaust = forceExhaust || subExchange != null && (subExchange.getException() != null || ExchangeHelper.isRedeliveryExhausted(subExchange));
789        if (original.getException() != null || subExchange != null && subExchange.getException() != null) {
790            // there was an exception and we stopped
791            stoppedOnException = isStopOnException();
792            exception = true;
793        }
794
795        // must copy results at this point
796        if (subExchange != null) {
797            if (stoppedOnException) {
798                // if we stopped due an exception then only propagte the exception
799                original.setException(subExchange.getException());
800            } else {
801                // copy the current result to original so it will contain this result of this eip
802                ExchangeHelper.copyResults(original, subExchange);
803            }
804        }
805
806        // .. and then if there was an exception we need to configure the redelivery exhaust
807        // for example the noErrorHandler will not cause redelivery exhaust so if this error
808        // handled has been in use, then the exhaust would be false (if not forced)
809        if (exception) {
810            // multicast uses error handling on its output processors and they have tried to redeliver
811            // so we shall signal back to the other error handlers that we are exhausted and they should not
812            // also try to redeliver as we will then do that twice
813            original.setProperty(Exchange.REDELIVERY_EXHAUSTED, exhaust);
814        }
815
816        callback.done(doneSync);
817    }
818
819    /**
820     * Aggregate the {@link Exchange} with the current result.
821     * This method is synchronized and is called directly when parallelAggregate is disabled (by default).
822     *
823     * @param strategy the aggregation strategy to use
824     * @param result   the current result
825     * @param exchange the exchange to be added to the result
826     * @see #doAggregateInternal(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
827     */
828    protected synchronized void doAggregate(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
829        doAggregateInternal(strategy, result, exchange);
830    }
831
832    /**
833     * Aggregate the {@link Exchange} with the current result.
834     * This method is unsynchronized and is called directly when parallelAggregate is enabled.
835     * In all other cases, this method is called from the doAggregate which is a synchronized method
836     *
837     * @param strategy the aggregation strategy to use
838     * @param result   the current result
839     * @param exchange the exchange to be added to the result
840     * @see #doAggregate(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
841     */
842    protected void doAggregateInternal(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
843        if (strategy != null) {
844            // prepare the exchanges for aggregation
845            Exchange oldExchange = result.get();
846            ExchangeHelper.prepareAggregation(oldExchange, exchange);
847            result.set(strategy.aggregate(oldExchange, exchange));
848        }
849    }
850
851    protected void updateNewExchange(Exchange exchange, int index, Iterable<ProcessorExchangePair> allPairs,
852                                     Iterator<ProcessorExchangePair> it) {
853        exchange.setProperty(Exchange.MULTICAST_INDEX, index);
854        if (it.hasNext()) {
855            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.FALSE);
856        } else {
857            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.TRUE);
858        }
859    }
860
861    protected Integer getExchangeIndex(Exchange exchange) {
862        return exchange.getProperty(Exchange.MULTICAST_INDEX, Integer.class);
863    }
864
865    protected Iterable<ProcessorExchangePair> createProcessorExchangePairs(Exchange exchange) throws Exception {
866        List<ProcessorExchangePair> result = new ArrayList<ProcessorExchangePair>(processors.size());
867
868        int index = 0;
869        for (Processor processor : processors) {
870            // copy exchange, and do not share the unit of work
871            Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
872
873            // If the multi-cast processor has an aggregation strategy
874            // then the StreamCache created by the child routes must not be 
875            // closed by the unit of work of the child route, but by the unit of 
876            // work of the parent route or grand parent route or grand grand parent route ...(in case of nesting).
877            // Set therefore the unit of work of the  parent route as stream cache unit of work, 
878            // if it is not already set.
879            if (copy.getProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK) == null) {
880                copy.setProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK, exchange.getUnitOfWork());
881            }
882            // if we share unit of work, we need to prepare the child exchange
883            if (isShareUnitOfWork()) {
884                prepareSharedUnitOfWork(copy, exchange);
885            }
886
887            // and add the pair
888            RouteContext routeContext = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getRouteContext() : null;
889            result.add(createProcessorExchangePair(index++, processor, copy, routeContext));
890        }
891
892        if (exchange.getException() != null) {
893            // force any exceptions occurred during creation of exchange paris to be thrown
894            // before returning the answer;
895            throw exchange.getException();
896        }
897
898        return result;
899    }
900
901    /**
902     * Creates the {@link ProcessorExchangePair} which holds the processor and exchange to be send out.
903     * <p/>
904     * You <b>must</b> use this method to create the instances of {@link ProcessorExchangePair} as they
905     * need to be specially prepared before use.
906     *
907     * @param index        the index
908     * @param processor    the processor
909     * @param exchange     the exchange
910     * @param routeContext the route context
911     * @return prepared for use
912     */
913    protected ProcessorExchangePair createProcessorExchangePair(int index, Processor processor, Exchange exchange,
914                                                                RouteContext routeContext) {
915        Processor prepared = processor;
916
917        // set property which endpoint we send to
918        setToEndpoint(exchange, prepared);
919
920        // rework error handling to support fine grained error handling
921        prepared = createErrorHandler(routeContext, exchange, prepared);
922
923        // invoke on prepare on the exchange if specified
924        if (onPrepare != null) {
925            try {
926                onPrepare.process(exchange);
927            } catch (Exception e) {
928                exchange.setException(e);
929            }
930        }
931        return new DefaultProcessorExchangePair(index, processor, prepared, exchange);
932    }
933
934    protected Processor createErrorHandler(RouteContext routeContext, Exchange exchange, Processor processor) {
935        Processor answer;
936
937        boolean tryBlock = exchange.getProperty(Exchange.TRY_ROUTE_BLOCK, false, boolean.class);
938
939        // do not wrap in error handler if we are inside a try block
940        if (!tryBlock && routeContext != null) {
941            // wrap the producer in error handler so we have fine grained error handling on
942            // the output side instead of the input side
943            // this is needed to support redelivery on that output alone and not doing redelivery
944            // for the entire multicast block again which will start from scratch again
945
946            // create key for cache
947            final PreparedErrorHandler key = new PreparedErrorHandler(routeContext, processor);
948
949            // lookup cached first to reuse and preserve memory
950            answer = errorHandlers.get(key);
951            if (answer != null) {
952                LOG.trace("Using existing error handler for: {}", processor);
953                return answer;
954            }
955
956            LOG.trace("Creating error handler for: {}", processor);
957            ErrorHandlerFactory builder = routeContext.getRoute().getErrorHandlerBuilder();
958            // create error handler (create error handler directly to keep it light weight,
959            // instead of using ProcessorDefinition.wrapInErrorHandler)
960            try {
961                processor = builder.createErrorHandler(routeContext, processor);
962
963                // and wrap in unit of work processor so the copy exchange also can run under UoW
964                answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
965
966                boolean child = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class) != null;
967
968                // must start the error handler
969                ServiceHelper.startServices(answer);
970
971                // here we don't cache the child unit of work
972                if (!child) {
973                    // add to cache
974                    errorHandlers.putIfAbsent(key, answer);
975                }
976
977            } catch (Exception e) {
978                throw ObjectHelper.wrapRuntimeCamelException(e);
979            }
980        } else {
981            // and wrap in unit of work processor so the copy exchange also can run under UoW
982            answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
983        }
984
985        return answer;
986    }
987
988    /**
989     * Strategy to create the unit of work to be used for the sub route
990     *
991     * @param routeContext the route context
992     * @param processor    the processor
993     * @param exchange     the exchange
994     * @return the unit of work processor
995     */
996    protected Processor createUnitOfWorkProcessor(RouteContext routeContext, Processor processor, Exchange exchange) {
997        String routeId = routeContext != null ? routeContext.getRoute().idOrCreate(routeContext.getCamelContext().getNodeIdFactory()) : null;
998        CamelInternalProcessor internal = new CamelInternalProcessor(processor);
999
1000        // and wrap it in a unit of work so the UoW is on the top, so the entire route will be in the same UoW
1001        UnitOfWork parent = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class);
1002        if (parent != null) {
1003            internal.addAdvice(new CamelInternalProcessor.ChildUnitOfWorkProcessorAdvice(routeId, parent));
1004        } else {
1005            internal.addAdvice(new CamelInternalProcessor.UnitOfWorkProcessorAdvice(routeId));
1006        }
1007
1008        // and then in route context so we can keep track which route this is at runtime
1009        if (routeContext != null) {
1010            internal.addAdvice(new CamelInternalProcessor.RouteContextAdvice(routeContext));
1011        }
1012        return internal;
1013    }
1014
1015    /**
1016     * Prepares the exchange for participating in a shared unit of work
1017     * <p/>
1018     * This ensures a child exchange can access its parent {@link UnitOfWork} when it participate
1019     * in a shared unit of work.
1020     *
1021     * @param childExchange  the child exchange
1022     * @param parentExchange the parent exchange
1023     */
1024    protected void prepareSharedUnitOfWork(Exchange childExchange, Exchange parentExchange) {
1025        childExchange.setProperty(Exchange.PARENT_UNIT_OF_WORK, parentExchange.getUnitOfWork());
1026    }
1027
1028    protected void doStart() throws Exception {
1029        if (isParallelProcessing() && executorService == null) {
1030            throw new IllegalArgumentException("ParallelProcessing is enabled but ExecutorService has not been set");
1031        }
1032        if (timeout > 0 && !isParallelProcessing()) {
1033            throw new IllegalArgumentException("Timeout is used but ParallelProcessing has not been enabled");
1034        }
1035        if (isParallelProcessing() && aggregateExecutorService == null) {
1036            // use unbounded thread pool so we ensure the aggregate on-the-fly task always will have assigned a thread
1037            // and run the tasks when the task is submitted. If not then the aggregate task may not be able to run
1038            // and signal completion during processing, which would lead to what would appear as a dead-lock or a slow processing
1039            String name = getClass().getSimpleName() + "-AggregateTask";
1040            aggregateExecutorService = createAggregateExecutorService(name);
1041        }
1042        ServiceHelper.startServices(aggregationStrategy, processors);
1043    }
1044
1045    /**
1046     * Strategy to create the thread pool for the aggregator background task which waits for and aggregates
1047     * completed tasks when running in parallel mode.
1048     *
1049     * @param name  the suggested name for the background thread
1050     * @return the thread pool
1051     */
1052    protected synchronized ExecutorService createAggregateExecutorService(String name) {
1053        // use a cached thread pool so we each on-the-fly task has a dedicated thread to process completions as they come in
1054        return camelContext.getExecutorServiceManager().newCachedThreadPool(this, name);
1055    }
1056
1057    @Override
1058    protected void doStop() throws Exception {
1059        ServiceHelper.stopServices(processors, errorHandlers, aggregationStrategy);
1060    }
1061
1062    @Override
1063    protected void doShutdown() throws Exception {
1064        ServiceHelper.stopAndShutdownServices(processors, errorHandlers, aggregationStrategy);
1065        // only clear error handlers when shutting down
1066        errorHandlers.clear();
1067
1068        if (shutdownExecutorService && executorService != null) {
1069            getCamelContext().getExecutorServiceManager().shutdownNow(executorService);
1070        }
1071        if (aggregateExecutorService != null) {
1072            getCamelContext().getExecutorServiceManager().shutdownNow(aggregateExecutorService);
1073        }
1074    }
1075
1076    protected static void setToEndpoint(Exchange exchange, Processor processor) {
1077        if (processor instanceof Producer) {
1078            Producer producer = (Producer) processor;
1079            exchange.setProperty(Exchange.TO_ENDPOINT, producer.getEndpoint().getEndpointUri());
1080        }
1081    }
1082
1083    protected AggregationStrategy getAggregationStrategy(Exchange exchange) {
1084        AggregationStrategy answer = null;
1085
1086        // prefer to use per Exchange aggregation strategy over a global strategy
1087        if (exchange != null) {
1088            Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1089            Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1090            if (map != null) {
1091                answer = map.get(this);
1092            }
1093        }
1094        if (answer == null) {
1095            // fallback to global strategy
1096            answer = getAggregationStrategy();
1097        }
1098        return answer;
1099    }
1100
1101    /**
1102     * Sets the given {@link org.apache.camel.processor.aggregate.AggregationStrategy} on the {@link Exchange}.
1103     *
1104     * @param exchange            the exchange
1105     * @param aggregationStrategy the strategy
1106     */
1107    protected void setAggregationStrategyOnExchange(Exchange exchange, AggregationStrategy aggregationStrategy) {
1108        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1109        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1110        if (map == null) {
1111            map = new ConcurrentHashMap<Object, AggregationStrategy>();
1112        } else {
1113            // it is not safe to use the map directly as the exchange doesn't have the deep copy of it's properties
1114            // we just create a new copy if we need to change the map
1115            map = new ConcurrentHashMap<Object, AggregationStrategy>(map);
1116        }
1117        // store the strategy using this processor as the key
1118        // (so we can store multiple strategies on the same exchange)
1119        map.put(this, aggregationStrategy);
1120        exchange.setProperty(Exchange.AGGREGATION_STRATEGY, map);
1121    }
1122
1123    /**
1124     * Removes the associated {@link org.apache.camel.processor.aggregate.AggregationStrategy} from the {@link Exchange}
1125     * which must be done after use.
1126     *
1127     * @param exchange the current exchange
1128     */
1129    protected void removeAggregationStrategyFromExchange(Exchange exchange) {
1130        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1131        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1132        if (map == null) {
1133            return;
1134        }
1135        // remove the strategy using this processor as the key
1136        map.remove(this);
1137    }
1138
1139    /**
1140     * Is the multicast processor working in streaming mode?
1141     * <p/>
1142     * In streaming mode:
1143     * <ul>
1144     * <li>we use {@link Iterable} to ensure we can send messages as soon as the data becomes available</li>
1145     * <li>for parallel processing, we start aggregating responses as they get send back to the processor;
1146     * this means the {@link org.apache.camel.processor.aggregate.AggregationStrategy} has to take care of handling out-of-order arrival of exchanges</li>
1147     * </ul>
1148     */
1149    public boolean isStreaming() {
1150        return streaming;
1151    }
1152
1153    /**
1154     * Should the multicast processor stop processing further exchanges in case of an exception occurred?
1155     */
1156    public boolean isStopOnException() {
1157        return stopOnException;
1158    }
1159
1160    /**
1161     * Returns the producers to multicast to
1162     */
1163    public Collection<Processor> getProcessors() {
1164        return processors;
1165    }
1166
1167    /**
1168     * An optional timeout in millis when using parallel processing
1169     */
1170    public long getTimeout() {
1171        return timeout;
1172    }
1173
1174    /**
1175     * Use {@link #getAggregationStrategy(org.apache.camel.Exchange)} instead.
1176     */
1177    public AggregationStrategy getAggregationStrategy() {
1178        return aggregationStrategy;
1179    }
1180
1181    public boolean isParallelProcessing() {
1182        return parallelProcessing;
1183    }
1184
1185    public boolean isShareUnitOfWork() {
1186        return shareUnitOfWork;
1187    }
1188
1189    public List<Processor> next() {
1190        if (!hasNext()) {
1191            return null;
1192        }
1193        return new ArrayList<Processor>(processors);
1194    }
1195
1196    public boolean hasNext() {
1197        return processors != null && !processors.isEmpty();
1198    }
1199}