001package com.nimbusds.infinispan.persistence.sql; 002 003 004import com.codahale.metrics.MetricRegistry; 005import com.codahale.metrics.Timer; 006import com.codahale.metrics.health.HealthCheckRegistry; 007import com.nimbusds.common.monitor.MonitorRegistries; 008import com.nimbusds.infinispan.persistence.common.InfinispanEntry; 009import com.nimbusds.infinispan.persistence.common.InfinispanStore; 010import com.nimbusds.infinispan.persistence.common.InternalMetadataBuilder; 011import com.nimbusds.infinispan.persistence.common.query.QueryExecutor; 012import com.nimbusds.infinispan.persistence.sql.config.SQLStoreConfiguration; 013import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutor; 014import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutorInitContext; 015import com.zaxxer.hikari.HikariConfig; 016import com.zaxxer.hikari.HikariDataSource; 017import io.reactivex.rxjava3.core.Flowable; 018import net.jcip.annotations.ThreadSafe; 019import org.infinispan.commons.configuration.ConfiguredBy; 020import org.infinispan.commons.persistence.Store; 021import org.infinispan.metadata.impl.PrivateMetadata; 022import org.infinispan.persistence.spi.InitializationContext; 023import org.infinispan.persistence.spi.MarshallableEntry; 024import org.infinispan.persistence.spi.MarshallableEntryFactory; 025import org.infinispan.persistence.spi.PersistenceException; 026import org.jooq.DSLContext; 027import org.jooq.Query; 028import org.jooq.SQLDialect; 029import org.jooq.conf.RenderNameStyle; 030import org.jooq.conf.Settings; 031import org.jooq.impl.DSL; 032import org.kohsuke.MetaInfServices; 033import org.reactivestreams.Publisher; 034 035import javax.sql.DataSource; 036import java.time.Instant; 037import java.util.List; 038import java.util.Map; 039import java.util.Properties; 040import java.util.concurrent.Executor; 041import java.util.concurrent.atomic.AtomicReference; 042import java.util.function.Predicate; 043 044import static org.jooq.impl.DSL.table; 045 046 047/** 048 * SQL store for Infinispan caches and maps. 049 */ 050@ThreadSafe 051@MetaInfServices 052@ConfiguredBy(SQLStoreConfiguration.class) 053@Store(shared = true) 054public class SQLStore<K,V> extends InfinispanStore<K,V> { 055 056 057 /** 058 * The supported databases. 059 */ 060 public static final List<SQLDialect> SUPPORTED_DATABASES = List.of( 061 SQLDialect.H2, SQLDialect.MYSQL, SQLDialect.POSTGRES_9_5, SQLDialect.SQLSERVER2016, SQLDialect.ORACLE18C 062 ); 063 064 065 /** 066 * The SQL store configuration. 067 */ 068 private SQLStoreConfiguration config; 069 070 071 /** 072 * Enables sharing of the Hikari SQL data sources. 073 */ 074 private final static DataSources SHARED_DATA_SOURCES = new DataSources(); 075 076 077 /** 078 * The Hikari SQL data source (with connection pool). 079 */ 080 private HikariDataSource dataSource; 081 082 083 /** 084 * Wrap the SQL data source with jOOQ. 085 * See http://stackoverflow.com/a/31389342/429425 086 */ 087 private DSLContext sql; 088 089 090 /** 091 * The SQL record transformer (to / from Infinispan entries). 092 */ 093 private SQLRecordTransformer<K,V> sqlRecordTransformer; 094 095 096 /** 097 * The optional SQL query executor. 098 */ 099 private SQLQueryExecutor<K,V> sqlQueryExecutor; 100 101 102 /** 103 * The marshalled Infinispan entry factory. 104 */ 105 private MarshallableEntryFactory<K, V> marshallableEntryFactory; 106 107 108 /** 109 * Purges expired entries found in the SQL store, as indicated by 110 * their persisted metadata (optional, may be ignored / not stored). 111 */ 112 private ExpiredEntryReaper<K,V> reaper; 113 114 115 /** 116 * SQL operation timers. 117 */ 118 private SQLTimers timers; 119 120 121 /** 122 * jOOQ query fixes. 123 */ 124 private JOOQFixes jooqFixes; 125 126 127 /** 128 * Loads an SQL record transformer with the specified class name. 129 * 130 * @param clazz The class. Must not be {@code null}. 131 * 132 * @return The SQL entry transformer. 133 */ 134 @SuppressWarnings( "unchecked" ) 135 private SQLRecordTransformer<K,V> loadRecordTransformerClass(final Class<?> clazz) { 136 137 try { 138 Class<SQLRecordTransformer<K,V>> genClazz = (Class<SQLRecordTransformer<K,V>>)clazz; 139 return genClazz.getDeclaredConstructor().newInstance(); 140 } catch (Exception e) { 141 throw new PersistenceException("Couldn't load SQL record transformer class: " + e.getMessage(), e); 142 } 143 } 144 145 146 /** 147 * Loads an SQL query executor with the specified class name. 148 * 149 * @param clazz The class. Must not be {@code null}. 150 * 151 * @return The SQL query executor. 152 */ 153 @SuppressWarnings( "unchecked" ) 154 private SQLQueryExecutor<K,V> loadQueryExecutorClass(final Class<?> clazz) { 155 156 try { 157 Class<SQLQueryExecutor<K,V>> genClazz = (Class<SQLQueryExecutor<K,V>>)clazz; 158 return genClazz.getDeclaredConstructor().newInstance(); 159 } catch (Exception e) { 160 throw new PersistenceException("Couldn't load SQL query executor class: " + e.getMessage(), e); 161 } 162 } 163 164 165 /** 166 * Returns the SQL store configuration. 167 * 168 * @return The SQL store configuration, {@code null} if not 169 * initialised. 170 */ 171 public SQLStoreConfiguration getConfiguration() { 172 173 return config; 174 } 175 176 177 /** 178 * Returns the underlying SQL data source. 179 * 180 * @return The underlying SQL data source, {@code null} if not 181 * initialised. 182 */ 183 public HikariDataSource getDataSource() { 184 185 return dataSource; 186 } 187 188 189 @Override 190 public void init(final InitializationContext ctx) { 191 192 // This method will be invoked by the PersistenceManager during initialization. The InitializationContext 193 // contains: 194 // - this CacheLoader's configuration 195 // - the cache to which this loader is applied. Your loader might want to use the cache's name to construct 196 // cache-specific identifiers 197 // - the StreamingMarshaller that needs to be used to marshall/unmarshall the entries 198 // - a TimeService which the loader can use to determine expired entries 199 // - a ByteBufferFactory which needs to be used to construct ByteBuffers 200 // - a MarshalledEntryFactory which needs to be used to construct entries from the data retrieved by the loader 201 202 super.init(ctx); 203 204 this.config = ctx.getConfiguration(); 205 206 Loggers.MAIN_LOG.info("[IS0100] SQL store: Infinispan cache store configuration for {}:", getCacheName()); 207 config.log(); 208 209 Loggers.MAIN_LOG.info("[IS0140] SQL store: Expiration thread wake up interval for cache {}: {}", getCacheName(), 210 ctx.getCache().getCacheConfiguration().expiration().wakeUpInterval()); 211 212 // Load and initialise the SQL record transformer 213 Loggers.MAIN_LOG.debug("[IS0101] Loading SQL record transformer class {} for cache {}...", 214 config.getRecordTransformerClass(), 215 getCacheName()); 216 217 sqlRecordTransformer = loadRecordTransformerClass(config.getRecordTransformerClass()); 218 sqlRecordTransformer.init(() -> config.getSQLDialect()); 219 220 jooqFixes = new JOOQFixes(config.getSQLDialect()); 221 222 // Load and initialise the optional SQL query executor 223 if (config.getQueryExecutorClass() != null) { 224 Loggers.MAIN_LOG.debug("[IS0201] Loading optional SQL query executor class {} for cache {}...", 225 config.getQueryExecutorClass(), 226 getCacheName()); 227 228 sqlQueryExecutor = loadQueryExecutorClass(config.getQueryExecutorClass()); 229 230 sqlQueryExecutor.init(new SQLQueryExecutorInitContext<>() { 231 @Override 232 public DataSource getDataSource() { 233 return dataSource; 234 } 235 236 237 @Override 238 public SQLRecordTransformer<K, V> getSQLRecordTransformer() { 239 return sqlRecordTransformer; 240 } 241 242 243 @Override 244 public SQLDialect getSQLDialect() { 245 return config.getSQLDialect(); 246 } 247 }); 248 } 249 250 marshallableEntryFactory = ctx.getMarshallableEntryFactory(); 251 252 timers = new SQLTimers(ctx.getCache().getName() + "."); 253 254 Loggers.MAIN_LOG.info("[IS0102] Initialized SQL external store for cache {} with table {}", 255 getCacheName(), 256 sqlRecordTransformer.getTableName()); 257 } 258 259 260 private RetrievedSQLRecord wrap(final org.jooq.Record record) { 261 // Prevent retrieval exceptions because of the Oracle's 262 // internal conversion of all table column names to upper case. 263 // Applied to any Oracle version. 264 final boolean fieldsToUpperCase = SQLDialect.ORACLE.family().equals(config.getSQLDialect().family()); 265 return new RetrievedSQLRecordImpl(record, fieldsToUpperCase); 266 } 267 268 269 /** 270 * Returns the underlying SQL record transformer. 271 * 272 * @return The SQL record transformer, {@code null} if not initialised. 273 */ 274 public SQLRecordTransformer<K, V> getSQLRecordTransformer() { 275 return sqlRecordTransformer; 276 } 277 278 279 @Override 280 public QueryExecutor<K, V> getQueryExecutor() { 281 return sqlQueryExecutor; 282 } 283 284 285 /** 286 * Starts the Hikari data source using the existing configuration. 287 * 288 * @return The data source. 289 */ 290 private HikariDataSource startDataSource() { 291 292 Properties hikariProps = HikariConfigUtils.removeNonHikariProperties(config.properties()); 293 hikariProps = HikariConfigUtils.removeBlankProperties(hikariProps); 294 HikariPoolName poolName = HikariPoolName.setDefaultPoolName(hikariProps, getCacheName()); 295 296 var hikariConfig = new HikariConfig(hikariProps); 297 298 MetricRegistry metricRegistry = MonitorRegistries.getMetricRegistry(); 299 if (HikariConfigUtils.metricsAlreadyRegistered(poolName, metricRegistry)) { 300 Loggers.MAIN_LOG.warn("[IS0130] SQL store: Couldn't register Dropwizard metrics: Existing registered metrics for " + getCacheName()); 301 } else { 302 hikariConfig.setMetricRegistry(metricRegistry); 303 } 304 305 HealthCheckRegistry healthCheckRegistry = MonitorRegistries.getHealthCheckRegistry(); 306 if (HikariConfigUtils.healthChecksAlreadyRegistered(poolName, healthCheckRegistry)) { 307 Loggers.MAIN_LOG.warn("[IS0131] SQL store: Couldn't register Dropwizard health checks: Existing registered health checks for " + getCacheName()); 308 } else { 309 hikariConfig.setHealthCheckRegistry(healthCheckRegistry); 310 } 311 312 return new HikariDataSource(hikariConfig); 313 } 314 315 316 @Override 317 public void start() { 318 319 // This method will be invoked by the PersistenceManager to start the CacheLoader. At this stage configuration 320 // is complete and the loader can perform operations such as opening a connection to the external storage, 321 // initialize internal data structures, etc. 322 323 if (config.getConnectionPool() == null) { 324 // Using own data source 325 dataSource = startDataSource(); 326 SHARED_DATA_SOURCES.put(getCacheName(), dataSource); 327 } else { 328 // Using shared data source 329 dataSource = SHARED_DATA_SOURCES.get(config.getConnectionPool()); 330 if (dataSource == null) { 331 // Defer start when connection pool becomes available 332 SHARED_DATA_SOURCES.deferStart(config.getConnectionPool(), this); 333 return; 334 } 335 } 336 337 Loggers.MAIN_LOG.info("[IS0143] SQL store: Transaction isolation for cache {}: {}", 338 getCacheName(), TXIsolation.inspect(dataSource)); 339 340 // Init jOOQ SQL context 341 var jooqSettings = new Settings(); 342 if (SQLDialect.H2.equals(config.getSQLDialect())) { 343 // Quoted column names occasionally cause problems in H2 344 jooqSettings.setRenderNameStyle(RenderNameStyle.AS_IS); 345 } 346 sql = DSL.using(dataSource, config.getSQLDialect(), jooqSettings); 347 348 if (config.createTableIfMissing()) { 349 try { 350 Loggers.MAIN_LOG.info("[IS0136] SQL store: Executing create table {} (if missing?) for cache {}", sqlRecordTransformer.getTableName(), getCacheName()); 351 int rows = sql.execute(sqlRecordTransformer.getCreateTableStatement()); 352 if (rows > 0) { 353 Loggers.MAIN_LOG.info("[IS0129] SQL store: Created table {} for cache {}", sqlRecordTransformer.getTableName(), getCacheName()); 354 } else { 355 Loggers.MAIN_LOG.info("[IS0129] SQL store: Create table {} (if missing?) for cache {} returned {} changed rows", sqlRecordTransformer.getTableName(), getCacheName(), rows); 356 } 357 358 } catch (Exception e) { 359 String msg = "[IS0103] SQL store: Create table failed, {}: " + e.getMessage(); 360 if (config.createTableIgnoreErrors()) { 361 Loggers.MAIN_LOG.warn(msg, "continuing"); 362 } else { 363 Loggers.MAIN_LOG.fatal(msg, "aborting", e); 364 throw new PersistenceException(e.getMessage(), e); 365 } 366 } 367 368 // Alter table? 369 if (sqlRecordTransformer instanceof SQLTableTransformer) { 370 Loggers.MAIN_LOG.info("[IS0133] SQL store: Found table transformer"); 371 List<String> transformQueries = ((SQLTableTransformer)sqlRecordTransformer) 372 .getTransformTableStatements( 373 SQLTableUtils.getColumnNames(table(sqlRecordTransformer.getTableName()), sql) 374 ); 375 if (transformQueries != null) { 376 for (String query: transformQueries) { 377 Loggers.MAIN_LOG.info("[IS0134] SQL store: Executing table transform for cache {}: {}", getCacheName(), query); 378 sql.execute(query); 379 } 380 } 381 } 382 383 } else { 384 Loggers.MAIN_LOG.info("[IS0132] SQL store: Skipped create table (if missing?) step"); 385 } 386 387 Loggers.MAIN_LOG.info("[IS0104] Started SQL external store connector for cache {} with table {}", getCacheName(), sqlRecordTransformer.getTableName()); 388 389 if (sqlRecordTransformer.getKeyColumnsForExpiredEntryReaper() != null) { 390 reaper = new ExpiredEntryPagedReaper<>( 391 marshallableEntryFactory, 392 sql, 393 sqlRecordTransformer, 394 this::wrap, 395 config.getExpiredQueryPageLimit(), 396 timers.deleteTimer); 397 } else { 398 reaper = new ExpiredEntryReaper<>( 399 marshallableEntryFactory, 400 sql, 401 sqlRecordTransformer, 402 this::wrap, 403 timers.deleteTimer); 404 } 405 } 406 407 408 @Override 409 public void stop() { 410 411 super.stop(); 412 413 SHARED_DATA_SOURCES.remove(getCacheName()); 414 415 if (dataSource != null && config.getConnectionPool() == null) { 416 dataSource.close(); 417 } 418 419 Loggers.MAIN_LOG.info("[IS0105] Stopped SQL store connector for cache {}", getCacheName()); 420 } 421 422 423 @SuppressWarnings("unchecked") 424 private K resolveKey(final Object key) { 425 426 if (key instanceof byte[]) { 427 throw new PersistenceException("Cannot resolve " + getCacheName() + " cache key from byte[], enable compatibility mode"); 428 } 429 430 return (K)key; 431 } 432 433 434 @Override 435 public boolean contains(final Object key) { 436 437 // This method will be invoked by the PersistenceManager to determine if the loader contains the specified key. 438 // The implementation should be as fast as possible, e.g. it should strive to transfer the least amount of data possible 439 // from the external storage to perform the check. Also, if possible, make sure the field is indexed on the external storage 440 // so that its existence can be determined as quickly as possible. 441 // 442 // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol 443 // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[]. 444 445 Loggers.SQL_LOG.trace("[IS0106] SQL store: Checking {} cache key {}", getCacheName(), key); 446 447 try (Timer.Context timerCtx = timers.loadTimer.time()) { 448 return sql.selectOne() 449 .from(table(sqlRecordTransformer.getTableName())) 450 .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key))) 451 .fetchOne() != null; 452 } catch (Exception e) { 453 Loggers.SQL_LOG.error("[IS0107] {}: {}", e.getMessage(), e); 454 throw new PersistenceException(e.getMessage(), e); 455 } 456 } 457 458 459 @Override 460 public MarshallableEntry<K, V> loadEntry(final Object key) { 461 462 // Outdated? 463 // Fetches an entry from the storage using the specified key. The CacheLoader should retrieve from the external storage all 464 // data that is needed to reconstruct the entry in memory, i.e. the value and optionally the metadata. This method needs to 465 // return a MarshalledEntry which can be constructed as follows: 466 // 467 // ctx.getMarshalledEntryFactory().new MarshalledEntry(key, value, metadata); 468 // 469 // If the entry does not exist or has expired, this method should return null. 470 // If an error occurs while retrieving data from the external storage, this method should throw a PersistenceException 471 // 472 // Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol 473 // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[]. 474 // If the loader needs to have knowledge of the key/value data beyond their binary representation, then it needs access to the key's and value's 475 // classes and the marshaller used to encode them. 476 477 Loggers.SQL_LOG.trace("[IS0108] SQL store: Loading {} cache entry with key {}", getCacheName(), key); 478 479 final org.jooq.Record record; 480 481 try (Timer.Context timerCtx = timers.loadTimer.time()) { 482 record = sql.selectFrom(table(sqlRecordTransformer.getTableName())) 483 .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key))) 484 .fetchOne(); 485 } catch (Exception e) { 486 Loggers.SQL_LOG.error("[IS0109] {}, {}", e.getMessage(), e); 487 throw new PersistenceException(e.getMessage(), e); 488 } 489 490 if (record == null) { 491 // Not found 492 Loggers.SQL_LOG.trace("[IS0110] SQL store: Record with key {} not found", key); 493 return null; 494 } 495 496 if (Loggers.SQL_LOG.isTraceEnabled()) { 497 Loggers.SQL_LOG.trace("[IS0111] SQL store: Retrieved record: {}", record); 498 } 499 500 // Transform SQL record to Infinispan entry 501 InfinispanEntry<K,V> infinispanEntry; 502 try { 503 infinispanEntry = sqlRecordTransformer.toInfinispanEntry(wrap(record)); 504 } catch (Exception e) { 505 Loggers.SQL_LOG.error("[IS0137] SQL store: Error transforming SQL record for key " + key + ": " + e.getMessage()); 506 throw e; 507 } 508 509 if (infinispanEntry.isExpired()) { 510 Loggers.SQL_LOG.trace("[IS0135] SQL store: Record with key {} expired", key); 511 return null; 512 } 513 514 return marshallableEntryFactory.create( 515 infinispanEntry.getKey(), 516 infinispanEntry.getValue(), 517 infinispanEntry.getMetadata(), 518 PrivateMetadata.empty(), 519 infinispanEntry.created(), 520 infinispanEntry.lastUsed() 521 ); 522 } 523 524 525 @Override 526 public boolean delete(final Object key) { 527 528 // The CacheWriter should remove from the external storage the entry identified by the specified key. 529 // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol 530 // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[]. 531 532 Loggers.SQL_LOG.trace("[IS0112] SQL store: Deleting {} cache entry with key {}", getCacheName(), key); 533 534 int deletedRows; 535 536 try (Timer.Context timerCtx = timers.deleteTimer.time()) { 537 deletedRows = sql.deleteFrom(table(sqlRecordTransformer.getTableName())) 538 .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key))) 539 .execute(); 540 } catch (Exception e) { 541 Loggers.SQL_LOG.error("[IS0113] {}, {}", e.getMessage(), e); 542 throw new PersistenceException(e.getMessage(), e); 543 } 544 545 Loggers.SQL_LOG.trace("[IS0113] SQL store: Deleted {} record with key {}", deletedRows, key); 546 547 if (deletedRows == 1) { 548 return true; 549 } else if (deletedRows == 0) { 550 return false; 551 } else { 552 Loggers.SQL_LOG.error("[IS0114] Too many deleted rows ({}) for key {}", deletedRows, key); 553 throw new PersistenceException("Too many deleted rows for key " + key); 554 } 555 } 556 557 558 @Override 559 public void write(final MarshallableEntry<? extends K, ? extends V> entry) { 560 561 Loggers.SQL_LOG.trace("[IS0115] SQL store: Writing {} cache entry {}", getCacheName(), entry); 562 563 try (Timer.Context timerCtx = timers.writeTimer.time()) { 564 SQLRecord sqlRecord = sqlRecordTransformer.toSQLRecord( 565 new InfinispanEntry<>( 566 entry.getKey(), 567 entry.getValue(), 568 new InternalMetadataBuilder() 569 .created(entry.created()) 570 .lastUsed(entry.lastUsed()) 571 .lifespan(entry.getMetadata() != null ? entry.getMetadata().lifespan() : -1L) 572 .maxIdle(entry.getMetadata() != null ? entry.getMetadata().maxIdle() : -1L) 573 .build())); 574 575 // Oracle (N)CLOB chunking 576 // https://stackoverflow.com/a/63957679/429425 577 final AtomicReference<Map<String, List<String>>> oracleClobChunksMap = new AtomicReference<>(); 578 sqlRecord = jooqFixes.prepareOracleWriteCLOB(sqlRecord, oracleClobChunksMap); 579 580 Query query = SQLQueryUtils.createUpsert(table(sqlRecordTransformer.getTableName()), sqlRecord, config.getSQLDialect(), sql); 581 582 int rows; 583 if (oracleClobChunksMap.get() != null && !oracleClobChunksMap.get().isEmpty()) { 584 rows = sql.execute(jooqFixes.completeOracleWriteClob(query.toString(), oracleClobChunksMap.get())); 585 } else { 586 rows = query.execute(); 587 } 588 589 if (rows != 1) { 590 591 if (SQLDialect.MYSQL.equals(config.getSQLDialect()) && rows == 2) { 592 // MySQL indicates UPDATE on INSERT by returning 2 num rows 593 return; 594 } 595 596 Loggers.SQL_LOG.error("[IS0116] SQL insert / update for key {} in table {} failed: Rows {}", 597 entry.getKey(),sqlRecordTransformer.getTableName(), rows); 598 throw new PersistenceException("(Synthetic) SQL MERGE failed: Rows " + rows); 599 } 600 601 } catch (Exception e) { 602 Loggers.SQL_LOG.error("[IS0117] {}: {}", e.getMessage(), e); 603 throw new PersistenceException(e.getMessage(), e); 604 } 605 } 606 607 608 @Override 609 public Publisher<MarshallableEntry<K, V>> entryPublisher(final Predicate<? super K> filter, final boolean fetchValue, final boolean fetchMetadata) { 610 611 Loggers.SQL_LOG.trace("[IS0118] SQL store: Processing key filter for {} cache: fetchValue={} fetchMetadata={}", 612 getCacheName(), fetchValue, fetchMetadata); 613 614 final Instant now = Instant.now(); 615 616 return Flowable.using(timers.processTimer::time, 617 ignore -> Flowable.fromIterable(sql.selectFrom(table(sqlRecordTransformer.getTableName())).fetch()) 618 .map(record -> sqlRecordTransformer.toInfinispanEntry(wrap(record))) 619 .filter(infinispanEntry -> filter == null || filter.test(infinispanEntry.getKey())) 620 .filter(infinispanEntry -> ! infinispanEntry.isExpired(now)) 621 .map(infinispanEntry -> marshallableEntryFactory.create( 622 infinispanEntry.getKey(), 623 infinispanEntry.getValue(), 624 infinispanEntry.getMetadata(), 625 PrivateMetadata.empty(), 626 infinispanEntry.created(), 627 infinispanEntry.lastUsed() 628 )) 629 .doOnError(e -> Loggers.SQL_LOG.error("[IS0119] {}: {}", e.getMessage(), e)), 630 Timer.Context::stop); 631 } 632 633 634 @Override 635 public int size() { 636 637 // Infinispan code analysis on 8.2 shows that this method is never called in practice, and 638 // is not wired to the data / cache container API 639 640 Loggers.SQL_LOG.trace("[IS0120] SQL store: Counting {} records", getCacheName()); 641 642 final int count; 643 644 try { 645 count = sql.fetchCount(table(sqlRecordTransformer.getTableName())); 646 647 } catch (Exception e) { 648 Loggers.SQL_LOG.error("[IS0121] {}: {}", e.getMessage(), e); 649 throw new PersistenceException(e.getMessage(), e); 650 } 651 652 Loggers.SQL_LOG.trace("[IS0122] SQL store: Counted {} {} records", count, getCacheName()); 653 654 return count; 655 } 656 657 658 @Override 659 public void clear() { 660 661 Loggers.SQL_LOG.trace("[IS0123] SQL store: Clearing {} records", getCacheName()); 662 663 int numDeleted; 664 665 try { 666 numDeleted = sql.deleteFrom(table(sqlRecordTransformer.getTableName())).execute(); 667 668 } catch (Exception e) { 669 Loggers.SQL_LOG.error("[IS0124] {}: {}", e.getMessage(), e); 670 throw new PersistenceException(e.getMessage(), e); 671 } 672 673 Loggers.SQL_LOG.info("[IS0125] SQL store: Cleared {} {} records", numDeleted, sqlRecordTransformer.getTableName()); 674 } 675 676 677 @Override 678 public void purge(final Executor executor, final PurgeListener<? super K> purgeListener) { 679 680 // Should never be called in the presence of purge(Executor,ExpirationPurgeListener) 681 682 Loggers.SQL_LOG.trace("[IS0126] SQL store: Purging {} cache entries", getCacheName()); 683 684 try (Timer.Context timerCtx = timers.purgeTimer.time()) { 685 executor.execute(() -> { 686 try { 687 reaper.purgeWithKeyListener(purgeListener); 688 } catch (Exception e) { 689 Loggers.SQL_LOG.warn("[IS0153] Purge failed, will retry on next run: {}", e.getMessage(), e); 690 } 691 }); 692 } catch (Exception e) { 693 Loggers.SQL_LOG.error("[IS0127] Failed to submit purge task: {}", e.getMessage(), e); 694 } 695 } 696 697 698 @Override 699 public void purge(final Executor executor, final ExpirationPurgeListener<K,V> purgeListener) { 700 701 Loggers.SQL_LOG.trace("[IS0150] SQL store: Purging {} cache entries", getCacheName()); 702 703 try (Timer.Context timerCtx = timers.purgeTimer.time()) { 704 executor.execute(() -> { 705 try { 706 reaper.purgeWithEntryListener(purgeListener); 707 } catch (Exception e) { 708 Loggers.SQL_LOG.warn("[IS0152] Purge failed, will retry on next run: {}", e.getMessage(), e); 709 } 710 }); 711 } catch (Exception e) { 712 Loggers.SQL_LOG.error("[IS0151] Failed to submit purge task: {}", e.getMessage(), e); 713 } 714 } 715}