001package com.nimbusds.infinispan.persistence.sql;
002
003
004import java.time.Instant;
005import java.util.List;
006import java.util.Properties;
007import java.util.concurrent.Executor;
008import java.util.function.Predicate;
009import javax.sql.DataSource;
010
011import static org.jooq.impl.DSL.table;
012
013import com.codahale.metrics.MetricRegistry;
014import com.codahale.metrics.Timer;
015import com.codahale.metrics.health.HealthCheckRegistry;
016import com.zaxxer.hikari.HikariConfig;
017import com.zaxxer.hikari.HikariDataSource;
018import io.reactivex.Flowable;
019import net.jcip.annotations.ThreadSafe;
020import org.infinispan.commons.configuration.ConfiguredBy;
021import org.infinispan.commons.persistence.Store;
022import org.infinispan.marshall.core.MarshalledEntry;
023import org.infinispan.marshall.core.MarshalledEntryFactory;
024import org.infinispan.persistence.spi.InitializationContext;
025import org.infinispan.persistence.spi.PersistenceException;
026import org.jooq.DSLContext;
027import org.jooq.Merge;
028import org.jooq.Record;
029import org.jooq.SQLDialect;
030import org.jooq.conf.RenderNameStyle;
031import org.jooq.conf.Settings;
032import org.jooq.impl.DSL;
033import org.kohsuke.MetaInfServices;
034import org.reactivestreams.Publisher;
035
036import com.nimbusds.common.monitor.MonitorRegistries;
037import com.nimbusds.infinispan.persistence.common.InfinispanEntry;
038import com.nimbusds.infinispan.persistence.common.InfinispanStore;
039import com.nimbusds.infinispan.persistence.common.query.QueryExecutor;
040import com.nimbusds.infinispan.persistence.sql.config.SQLStoreConfiguration;
041import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutor;
042import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutorInitContext;
043
044
045/**
046 * SQL store for Infinispan 9.0+ caches and maps.
047 */
048@ThreadSafe
049@MetaInfServices
050@ConfiguredBy(SQLStoreConfiguration.class)
051@Store(shared = true)
052public class SQLStore<K,V> extends InfinispanStore<K,V> {
053
054
055        /**
056         * The SQL store configuration.
057         */
058        private SQLStoreConfiguration config;
059        
060        
061        /**
062         * Enables sharing of the Hikari SQL data sources.
063         */
064        private final static DataSources SHARED_DATA_SOURCES = new DataSources();
065        
066        
067        /**
068         * The Hikari SQL data source (with connection pool).
069         */
070        private HikariDataSource dataSource;
071        
072        
073        /**
074         * Wrap the SQL data source with jOOQ.
075         * See http://stackoverflow.com/a/31389342/429425
076         */
077        private DSLContext sql;
078
079
080        /**
081         * The SQL record transformer (to / from Infinispan entries).
082         */
083        private SQLRecordTransformer<K,V> sqlRecordTransformer;
084        
085        
086        /**
087         * The optional SQL query executor.
088         */
089        private SQLQueryExecutor<K,V> sqlQueryExecutor;
090
091
092        /**
093         * The marshalled Infinispan entry factory.
094         */
095        private MarshalledEntryFactory<K, V> marshalledEntryFactory;
096
097
098        /**
099         * Purges expired entries found in the SQL store, as indicated by
100         * their persisted metadata (optional, may be ignored / not stored).
101         */
102        private ExpiredEntryReaper<K,V> reaper;
103        
104        
105        /**
106         * SQL operation timers.
107         */
108        private SQLTimers timers;
109        
110        
111        /**
112         * jOOQ query fixes.
113         */
114        private JOOQFixes jooqFixes;
115
116
117        /**
118         * Loads an SQL record transformer with the specified class name.
119         *
120         * @param clazz The class. Must not be {@code null}.
121         *
122         * @return The SQL entry transformer.
123         */
124        @SuppressWarnings( "unchecked" )
125        private SQLRecordTransformer<K,V> loadRecordTransformerClass(final Class clazz) {
126
127                try {
128                        Class<SQLRecordTransformer<K,V>> genClazz = (Class<SQLRecordTransformer<K,V>>)clazz;
129                        return genClazz.getDeclaredConstructor().newInstance();
130                } catch (Exception e) {
131                        throw new PersistenceException("Couldn't load SQL record transformer class: " + e.getMessage(), e);
132                }
133        }
134        
135        
136        /**
137         * Loads an SQL query executor with the specified class name.
138         *
139         * @param clazz The class. Must not be {@code null}.
140         *
141         * @return The SQL query executor.
142         */
143        @SuppressWarnings( "unchecked" )
144        private SQLQueryExecutor<K,V> loadQueryExecutorClass(final Class clazz) {
145                
146                try {
147                        Class<SQLQueryExecutor<K,V>> genClazz = (Class<SQLQueryExecutor<K,V>>)clazz;
148                        return genClazz.getDeclaredConstructor().newInstance();
149                } catch (Exception e) {
150                        throw new PersistenceException("Couldn't load SQL query executor class: " + e.getMessage(), e);
151                }
152        }
153        
154        
155        /**
156         * Returns the SQL store configuration.
157         *
158         * @return The SQL store configuration, {@code null} if not
159         *         initialised.
160         */
161        public SQLStoreConfiguration getConfiguration() {
162                
163                return config;
164        }
165        
166        
167        /**
168         * Returns the underlying SQL data source.
169         *
170         * @return The underlying SQL data source, {@code null} if not
171         *         initialised.
172         */
173        public HikariDataSource getDataSource() {
174                
175                return dataSource;
176        }
177
178
179        @Override
180        @SuppressWarnings("unchecked")
181        public void init(final InitializationContext ctx) {
182
183                // This method will be invoked by the PersistenceManager during initialization. The InitializationContext
184                // contains:
185                // - this CacheLoader's configuration
186                // - the cache to which this loader is applied. Your loader might want to use the cache's name to construct
187                //   cache-specific identifiers
188                // - the StreamingMarshaller that needs to be used to marshall/unmarshall the entries
189                // - a TimeService which the loader can use to determine expired entries
190                // - a ByteBufferFactory which needs to be used to construct ByteBuffers
191                // - a MarshalledEntryFactory which needs to be used to construct entries from the data retrieved by the loader
192
193                super.init(ctx);
194                
195                this.config = ctx.getConfiguration();
196
197                Loggers.MAIN_LOG.info("[IS0100] SQL store: Infinispan cache store configuration for {}:", getCacheName());
198                config.log();
199                
200                Loggers.MAIN_LOG.info("[IS0140] SQL store: Expiration thread wake up interval for cache {}: {}", getCacheName(),
201                        ctx.getCache().getCacheConfiguration().expiration().wakeUpInterval());
202                
203                // Load and initialise the SQL record transformer
204                Loggers.MAIN_LOG.debug("[IS0101] Loading SQL record transformer class {} for cache {}...",
205                        config.getRecordTransformerClass(),
206                        getCacheName());
207                
208                sqlRecordTransformer = loadRecordTransformerClass(config.getRecordTransformerClass());
209                sqlRecordTransformer.init(() -> config.getSQLDialect());
210                
211                jooqFixes = new JOOQFixes(config.getSQLDialect(), sqlRecordTransformer.getCreateTableStatement());
212                
213                // Load and initialise the optional SQL query executor
214                if (config.getQueryExecutorClass() != null) {
215                        Loggers.MAIN_LOG.debug("[IS0201] Loading optional SQL query executor class {} for cache {}...",
216                                config.getQueryExecutorClass(),
217                                getCacheName());
218                        
219                        sqlQueryExecutor = loadQueryExecutorClass(config.getQueryExecutorClass());
220                        
221                        sqlQueryExecutor.init(new SQLQueryExecutorInitContext<K, V>() {
222                                @Override
223                                public DataSource getDataSource() {
224                                        return dataSource;
225                                }
226                                
227                                
228                                @Override
229                                public SQLRecordTransformer<K, V> getSQLRecordTransformer() {
230                                        return sqlRecordTransformer;
231                                }
232                                
233                                
234                                @Override
235                                public SQLDialect getSQLDialect() {
236                                        return config.getSQLDialect();
237                                }
238                        });
239                }
240
241                marshalledEntryFactory = (MarshalledEntryFactory<K, V>)ctx.getMarshalledEntryFactory();
242                
243                timers = new SQLTimers(ctx.getCache().getName() + ".");
244
245                Loggers.MAIN_LOG.info("[IS0102] Initialized SQL external store for cache {} with table {}",
246                        getCacheName(),
247                        sqlRecordTransformer.getTableName());
248        }
249        
250        
251        /**
252         * Returns the underlying SQL record transformer.
253         *
254         * @return The SQL record transformer, {@code null} if not initialised.
255         */
256        public SQLRecordTransformer<K, V> getSQLRecordTransformer() {
257                return sqlRecordTransformer;
258        }
259        
260        
261        @Override
262        public QueryExecutor<K, V> getQueryExecutor() {
263                
264                return sqlQueryExecutor;
265        }
266        
267        
268        /**
269         * Starts the Hikari data source using the existing configuration.
270         *
271         * @return The data source.
272         */
273        private HikariDataSource startDataSource() {
274                
275                Properties hikariProps = HikariConfigUtils.removeNonHikariProperties(config.properties());
276                HikariPoolName poolName = HikariPoolName.setDefaultPoolName(hikariProps, getCacheName());
277                
278                HikariConfig hikariConfig = new HikariConfig(hikariProps);
279                
280                MetricRegistry metricRegistry = MonitorRegistries.getMetricRegistry();
281                if (HikariConfigUtils.metricsAlreadyRegistered(poolName, metricRegistry)) {
282                        Loggers.MAIN_LOG.warn("[IS0130] SQL store: Couldn't register Dropwizard metrics: Existing registered metrics for " + getCacheName());
283                } else {
284                        hikariConfig.setMetricRegistry(metricRegistry);
285                }
286                
287                HealthCheckRegistry healthCheckRegistry = MonitorRegistries.getHealthCheckRegistry();
288                if (HikariConfigUtils.healthChecksAlreadyRegistered(poolName, healthCheckRegistry)) {
289                        Loggers.MAIN_LOG.warn("[IS0131] SQL store: Couldn't register Dropwizard health checks: Existing registered health checks for " + getCacheName());
290                } else {
291                        hikariConfig.setHealthCheckRegistry(healthCheckRegistry);
292                }
293                
294                return new HikariDataSource(hikariConfig);
295        }
296        
297        
298        @Override
299        public void start() {
300
301                // This method will be invoked by the PersistenceManager to start the CacheLoader. At this stage configuration
302                // is complete and the loader can perform operations such as opening a connection to the external storage,
303                // initialize internal data structures, etc.
304                
305                if (config.getConnectionPool() == null) {
306                        // Using own data source
307                        dataSource = startDataSource();
308                        SHARED_DATA_SOURCES.put(getCacheName(), dataSource);
309                } else {
310                        // Using shared data source
311                        dataSource = SHARED_DATA_SOURCES.get(config.getConnectionPool());
312                        if (dataSource == null) {
313                                // Defer start when connection pool becomes available
314                                SHARED_DATA_SOURCES.deferStart(config.getConnectionPool(), this);
315                                return;
316                        }
317                }
318                
319                // Init jOOQ SQL context
320                Settings jooqSettings = new Settings();
321                if (config.getSQLDialect().equals(SQLDialect.H2)) {
322                        // Quoted column names occasionally cause problems in H2
323                        jooqSettings.setRenderNameStyle(RenderNameStyle.AS_IS);
324                }
325                sql = DSL.using(dataSource, config.getSQLDialect(), jooqSettings);
326                
327                if (config.createTableIfMissing()) {
328                        try {
329                                int rows = sql.execute(sqlRecordTransformer.getCreateTableStatement());
330                                
331                                if (rows > 0) {
332                                        Loggers.MAIN_LOG.info("[IS0129] SQL store: Created table {} for cache {}", sqlRecordTransformer.getTableName(), getCacheName());
333                                }
334                                
335                        } catch (Exception e) {
336                                Loggers.MAIN_LOG.fatal("[IS0103] SQL store: Create table if not exists failed: {}: e", e.getMessage(), e);
337                                throw new PersistenceException(e.getMessage(), e);
338                        }
339                        
340                        // Alter table?
341                        if (sqlRecordTransformer instanceof SQLTableTransformer) {
342                                Loggers.MAIN_LOG.warn("[IS0133] SQL store: Found table transformer");
343                                List<String> transformQueries = ((SQLTableTransformer)sqlRecordTransformer)
344                                        .getTransformTableStatements(
345                                                SQLTableUtils.getColumnNames(table(sqlRecordTransformer.getTableName()), sql)
346                                        );
347                                if (transformQueries != null) {
348                                        for (String query: transformQueries) {
349                                                Loggers.MAIN_LOG.info("[IS0134] SQL store: About to execute table transform query: " + query);
350                                                sql.execute(query);
351                                        }
352                                }
353                        }
354                        
355                } else {
356                        Loggers.MAIN_LOG.info("[IS0132] SQL store: Skipped create table if missing step");
357                }
358
359                Loggers.MAIN_LOG.info("[IS0104] Started SQL external store connector for cache {} with table {}", getCacheName(), sqlRecordTransformer.getTableName());
360
361                reaper = new ExpiredEntryReaper<>(marshalledEntryFactory, sql, sqlRecordTransformer);
362        }
363
364
365        @Override
366        public void stop() {
367
368                super.stop();
369                
370                SHARED_DATA_SOURCES.remove(getCacheName());
371                
372                if (dataSource != null && config.getConnectionPool() == null) {
373                        dataSource.close();
374                }
375                
376                Loggers.MAIN_LOG.info("[IS0105] Stopped SQL store connector for cache {}",  getCacheName());
377        }
378
379
380        @SuppressWarnings("unchecked")
381        private K resolveKey(final Object key) {
382
383                if (key instanceof byte[]) {
384                        throw new PersistenceException("Cannot resolve " + getCacheName() + " cache key from byte[], enable compatibility mode");
385                }
386
387                return (K)key;
388        }
389
390
391        @Override
392        public boolean contains(final Object key) {
393
394                // This method will be invoked by the PersistenceManager to determine if the loader contains the specified key.
395                // The implementation should be as fast as possible, e.g. it should strive to transfer the least amount of data possible
396                // from the external storage to perform the check. Also, if possible, make sure the field is indexed on the external storage
397                // so that its existence can be determined as quickly as possible.
398                //
399                // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
400                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
401
402                Loggers.SQL_LOG.trace("[IS0106] SQL store: Checking {} cache key {}", getCacheName(), key);
403                
404                Timer.Context timerCtx = timers.loadTimer.time();
405                
406                try {
407                        return sql.selectOne()
408                                .from(table(sqlRecordTransformer.getTableName()))
409                                .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
410                                .fetchOne() != null;
411                        
412                } catch (Exception e) {
413                        Loggers.SQL_LOG.error("[IS0107] {}: {}", e.getMessage(), e);
414                        throw new PersistenceException(e.getMessage(), e);
415                } finally {
416                        timerCtx.stop();
417                }
418        }
419
420
421        @Override
422        public MarshalledEntry<K,V> load(final Object key) {
423
424                // Fetches an entry from the storage using the specified key. The CacheLoader should retrieve from the external storage all
425                // of the data that is needed to reconstruct the entry in memory, i.e. the value and optionally the metadata. This method
426                // needs to return a MarshalledEntry which can be constructed as follows:
427                //
428                // ctx.getMarshalledEntryFactory().new MarshalledEntry(key, value, metadata);
429                //
430                // If the entry does not exist or has expired, this method should return null.
431                // If an error occurs while retrieving data from the external storage, this method should throw a PersistenceException
432                //
433                // Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol
434                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
435                // If the loader needs to have knowledge of the key/value data beyond their binary representation, then it needs access to the key's and value's
436                // classes and the marshaller used to encode them.
437
438                Loggers.SQL_LOG.trace("[IS0108] SQL store: Loading {} cache entry with key {}", getCacheName(), key);
439                
440                final Record record;
441                
442                Timer.Context timerCtx = timers.loadTimer.time();
443                
444                try {
445                        record = sql.selectFrom(table(sqlRecordTransformer.getTableName()))
446                                .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
447                                .fetchOne();
448                        
449                } catch (Exception e) {
450                        Loggers.SQL_LOG.error("[IS0109] {}, {}", e.getMessage(), e);
451                        throw new PersistenceException(e.getMessage(), e);
452                } finally {
453                        timerCtx.stop();
454                }
455                
456                if (record == null) {
457                        // Not found
458                        Loggers.SQL_LOG.trace("[IS0110] SQL store: Record with key {} not found", key);
459                        return null;
460                }
461                
462                if (Loggers.SQL_LOG.isTraceEnabled()) {
463                        Loggers.SQL_LOG.trace("[IS0111] SQL store: Retrieved record: {}", record);
464                }
465
466                // Transform SQL entry to Infinispan entry
467                InfinispanEntry<K,V> infinispanEntry;
468                try {
469                        infinispanEntry = sqlRecordTransformer.toInfinispanEntry(record);
470                } catch (Exception e) {
471                        Loggers.SQL_LOG.error("[IS0137] SQL store: Error transforming SQL record for key " + key + ": " + e.getMessage());
472                        throw e;
473                }
474                
475                if (infinispanEntry.isExpired()) {
476                        Loggers.SQL_LOG.trace("[IS0135] SQL store: Record with key {} expired", key);
477                        return null;
478                }
479
480                return marshalledEntryFactory.newMarshalledEntry(
481                        infinispanEntry.getKey(),
482                        infinispanEntry.getValue(),
483                        infinispanEntry.getMetadata());
484        }
485
486
487        @Override
488        public boolean delete(final Object key) {
489
490                // The CacheWriter should remove from the external storage the entry identified by the specified key.
491                // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
492                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
493
494                Loggers.SQL_LOG.trace("[IS0112] SQL store: Deleting {} cache entry with key {}", getCacheName(), key);
495                
496                int deletedRows;
497                
498                Timer.Context timerCtx = timers.deleteTimer.time();
499                
500                try {
501                        deletedRows = sql.deleteFrom(table(sqlRecordTransformer.getTableName()))
502                                .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
503                                .execute();
504                        
505                } catch (Exception e) {
506                        Loggers.SQL_LOG.error("[IS0113] {}, {}", e.getMessage(), e);
507                        throw new PersistenceException(e.getMessage(), e);
508                } finally {
509                        timerCtx.stop();
510                }
511                
512                Loggers.SQL_LOG.trace("[IS0113] SQL store: Deleted {} record with key {}", deletedRows, key);
513                
514                if (deletedRows == 1) {
515                        return true;
516                } else if (deletedRows == 0) {
517                        return false;
518                } else {
519                        Loggers.SQL_LOG.error("[IS0114] Too many deleted rows ({}) for key {}", deletedRows, key);
520                        throw new PersistenceException("Too many deleted rows for key " + key);
521                }
522        }
523
524
525        @Override
526        public void write(final MarshalledEntry<? extends K, ? extends V> marshalledEntry) {
527
528                // The CacheWriter should write the specified entry to the external storage.
529                //
530                // The PersistenceManager uses MarshalledEntry as the default format so that CacheWriters can efficiently store data coming
531                // from a remote node, thus avoiding any additional transformation steps.
532                //
533                // Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol
534                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
535
536                Loggers.SQL_LOG.trace("[IS0115] SQL store: Writing {} cache entry {}", getCacheName(), marshalledEntry);
537                
538                Timer.Context timerCtx = timers.writeTimer.time();
539                
540                try {
541                        SQLRecord sqlRecord = sqlRecordTransformer.toSQLRecord(
542                                new InfinispanEntry<>(
543                                        marshalledEntry.getKey(),
544                                        marshalledEntry.getValue(),
545                                        marshalledEntry.getMetadata()));
546                        
547                        // Use H2 style MERGE, JOOQ will adapt it for the particular database
548                        // http://www.jooq.org/doc/3.8/manual/sql-building/sql-statements/merge-statement/
549                        Merge mergeStatement = sql.mergeInto(table(sqlRecordTransformer.getTableName()), sqlRecord.getFields().keySet())
550                                .key(sqlRecord.getKeyColumns())
551                                .values(sqlRecord.getFields().values());
552                        
553                        String sqlStatement = jooqFixes.fixMergeStatement(mergeStatement);
554                        
555                        int rows = sql.execute(sqlStatement);
556                                
557                        if (rows != 1) {
558                                
559                                if (SQLDialect.MYSQL.equals(config.getSQLDialect()) && rows == 2) {
560                                        // MySQL indicates UPDATE on INSERT by returning 2 num rows
561                                        return;
562                                }
563                                
564                                Loggers.SQL_LOG.error("[IS0116] SQL insert / update for key {} in table {} failed: Rows {}",
565                                        marshalledEntry.getKey(),sqlRecordTransformer.getTableName(),  rows);
566                                throw new PersistenceException("(Synthetic) SQL MERGE failed: Rows " + rows);
567                        }
568                        
569                } catch (Exception e) {
570                        Loggers.SQL_LOG.error("[IS0117] {}: {}", e.getMessage(), e);
571                        throw new PersistenceException(e.getMessage(), e);
572                } finally {
573                        timerCtx.stop();
574                }
575        }
576        
577        
578        @Override
579        public Publisher<MarshalledEntry<K, V>> publishEntries(Predicate<? super K> filter, boolean fetchValue, boolean fetchMetadata) {
580                
581                Loggers.SQL_LOG.trace("[IS0118] SQL store: Processing key filter for {} cache: fetchValue={} fetchMetadata={}",
582                        getCacheName(), fetchValue, fetchMetadata);
583                
584                final Instant now = Instant.now();
585                
586                return Flowable.using(timers.processTimer::time,
587                        ignore -> Flowable.fromIterable(sql.selectFrom(table(sqlRecordTransformer.getTableName())).fetch())
588                                .map(sqlRecordTransformer::toInfinispanEntry)
589                                .filter(infinispanEntry -> filter == null || filter.test(infinispanEntry.getKey()))
590                                .filter(infinispanEntry -> ! infinispanEntry.isExpired(now))
591                                .map(infinispanEntry -> marshalledEntryFactory.newMarshalledEntry(
592                                        infinispanEntry.getKey(),
593                                        infinispanEntry.getValue(),
594                                        infinispanEntry.getMetadata()))
595                                .doOnError(e -> Loggers.SQL_LOG.error("[IS0119] {}: {}", e.getMessage(), e)),
596                        Timer.Context::stop);
597        }
598
599
600        @Override
601        public int size() {
602
603                // Infinispan code analysis on 8.2 shows that this method is never called in practice, and
604                // is not wired to the data / cache container API
605
606                Loggers.SQL_LOG.trace("[IS0120] SQL store: Counting {} records", getCacheName());
607
608                final int count;
609                
610                try {
611                        count = sql.fetchCount(table(sqlRecordTransformer.getTableName()));
612                        
613                } catch (Exception e) {
614                        Loggers.SQL_LOG.error("[IS0121] {}: {}", e.getMessage(), e);
615                        throw new PersistenceException(e.getMessage(), e);
616                }
617
618                Loggers.SQL_LOG.trace("[IS0122] SQL store: Counted {} {} records", count, getCacheName());
619
620                return count;
621        }
622
623
624        @Override
625        public void clear() {
626
627                Loggers.SQL_LOG.trace("[IS0123] SQL store: Clearing {} records", getCacheName());
628
629                int numDeleted;
630                
631                try {
632                        numDeleted = sql.deleteFrom(table(sqlRecordTransformer.getTableName())).execute();
633                        
634                } catch (Exception e) {
635                        Loggers.SQL_LOG.error("[IS0124] {}: {}", e.getMessage(), e);
636                        throw new PersistenceException(e.getMessage(), e);
637                }
638
639                Loggers.SQL_LOG.info("[IS0125] SQL store: Cleared {} {} records", numDeleted, sqlRecordTransformer.getTableName());
640        }
641
642
643        @Override
644        public void purge(final Executor executor, final PurgeListener<? super K> purgeListener) {
645
646                // Should never be called in the presence of purge(Executor,ExpirationPurgeListener)
647                
648                Loggers.SQL_LOG.trace("[IS0126] SQL store: Purging {} cache entries", getCacheName());
649                
650                Timer.Context timerCtx = timers.purgeTimer.time();
651
652                try {
653                        executor.execute(() -> reaper.purge(purgeListener));
654                        
655                } catch (Exception e) {
656                        Loggers.SQL_LOG.error("[IS0127] {}: {}", e.getMessage(), e);
657                        throw new PersistenceException("Purge exception: " + e.getMessage(), e);
658                } finally {
659                        timerCtx.stop();
660                }
661        }
662        
663        
664        @Override
665        public void purge(final Executor executor, final ExpirationPurgeListener<K,V> purgeListener) {
666                
667                Loggers.SQL_LOG.trace("[IS0150] SQL store: Purging {} cache entries", getCacheName());
668                
669                Timer.Context timerCtx = timers.purgeTimer.time();
670                
671                try {
672                        executor.execute(() -> reaper.purgeExtended(purgeListener));
673                        
674                } catch (Exception e) {
675                        Loggers.SQL_LOG.error("[IS0151] {}: {}", e.getMessage(), e);
676                        throw new PersistenceException("Purge exception: " + e.getMessage(), e);
677                } finally {
678                        timerCtx.stop();
679                }
680        }
681}