001package com.nimbusds.infinispan.persistence.sql; 002 003 004import com.codahale.metrics.MetricRegistry; 005import com.codahale.metrics.Timer; 006import com.codahale.metrics.health.HealthCheckRegistry; 007import com.nimbusds.common.monitor.MonitorRegistries; 008import com.nimbusds.infinispan.persistence.common.InfinispanEntry; 009import com.nimbusds.infinispan.persistence.common.InfinispanStore; 010import com.nimbusds.infinispan.persistence.common.InternalMetadataBuilder; 011import com.nimbusds.infinispan.persistence.common.query.QueryExecutor; 012import com.nimbusds.infinispan.persistence.sql.config.SQLStoreConfiguration; 013import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutor; 014import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutorInitContext; 015import com.zaxxer.hikari.HikariConfig; 016import com.zaxxer.hikari.HikariDataSource; 017import io.reactivex.rxjava3.core.Flowable; 018import net.jcip.annotations.ThreadSafe; 019import org.infinispan.commons.configuration.ConfiguredBy; 020import org.infinispan.commons.persistence.Store; 021import org.infinispan.metadata.impl.PrivateMetadata; 022import org.infinispan.persistence.spi.InitializationContext; 023import org.infinispan.persistence.spi.MarshallableEntry; 024import org.infinispan.persistence.spi.MarshallableEntryFactory; 025import org.infinispan.persistence.spi.PersistenceException; 026import org.jooq.DSLContext; 027import org.jooq.Field; 028import org.jooq.Query; 029import org.jooq.SQLDialect; 030import org.jooq.conf.RenderNameStyle; 031import org.jooq.conf.Settings; 032import org.jooq.impl.DSL; 033import org.kohsuke.MetaInfServices; 034import org.reactivestreams.Publisher; 035 036import javax.sql.DataSource; 037import java.time.Instant; 038import java.util.HashMap; 039import java.util.List; 040import java.util.Map; 041import java.util.Properties; 042import java.util.concurrent.Executor; 043import java.util.concurrent.atomic.AtomicReference; 044import java.util.function.BiConsumer; 045import java.util.function.Predicate; 046 047import static org.jooq.impl.DSL.table; 048 049 050/** 051 * SQL store for Infinispan caches and maps. 052 */ 053@ThreadSafe 054@MetaInfServices 055@ConfiguredBy(SQLStoreConfiguration.class) 056@Store(shared = true) 057public class SQLStore<K,V> extends InfinispanStore<K,V> { 058 059 060 /** 061 * The supported databases. 062 */ 063 public static final List<SQLDialect> SUPPORTED_DATABASES = List.of( 064 SQLDialect.H2, SQLDialect.MYSQL, SQLDialect.POSTGRES_9_5, SQLDialect.SQLSERVER2016, SQLDialect.ORACLE 065 ); 066 067 068 /** 069 * The SQL store configuration. 070 */ 071 private SQLStoreConfiguration config; 072 073 074 /** 075 * Enables sharing of the Hikari SQL data sources. 076 */ 077 private final static DataSources SHARED_DATA_SOURCES = new DataSources(); 078 079 080 /** 081 * The Hikari SQL data source (with connection pool). 082 */ 083 private HikariDataSource dataSource; 084 085 086 /** 087 * Wrap the SQL data source with jOOQ. 088 * See http://stackoverflow.com/a/31389342/429425 089 */ 090 private DSLContext sql; 091 092 093 /** 094 * The SQL record transformer (to / from Infinispan entries). 095 */ 096 private SQLRecordTransformer<K,V> sqlRecordTransformer; 097 098 099 /** 100 * The optional SQL query executor. 101 */ 102 private SQLQueryExecutor<K,V> sqlQueryExecutor; 103 104 105 /** 106 * The marshalled Infinispan entry factory. 107 */ 108 private MarshallableEntryFactory<K, V> marshallableEntryFactory; 109 110 111 /** 112 * Purges expired entries found in the SQL store, as indicated by 113 * their persisted metadata (optional, may be ignored / not stored). 114 */ 115 private ExpiredEntryReaper<K,V> reaper; 116 117 118 /** 119 * SQL operation timers. 120 */ 121 private SQLTimers timers; 122 123 124 /** 125 * jOOQ query fixes. 126 */ 127 private JOOQFixes jooqFixes; 128 129 130 /** 131 * Loads an SQL record transformer with the specified class name. 132 * 133 * @param clazz The class. Must not be {@code null}. 134 * 135 * @return The SQL entry transformer. 136 */ 137 @SuppressWarnings( "unchecked" ) 138 private SQLRecordTransformer<K,V> loadRecordTransformerClass(final Class<?> clazz) { 139 140 try { 141 Class<SQLRecordTransformer<K,V>> genClazz = (Class<SQLRecordTransformer<K,V>>)clazz; 142 return genClazz.getDeclaredConstructor().newInstance(); 143 } catch (Exception e) { 144 throw new PersistenceException("Couldn't load SQL record transformer class: " + e.getMessage(), e); 145 } 146 } 147 148 149 /** 150 * Loads an SQL query executor with the specified class name. 151 * 152 * @param clazz The class. Must not be {@code null}. 153 * 154 * @return The SQL query executor. 155 */ 156 @SuppressWarnings( "unchecked" ) 157 private SQLQueryExecutor<K,V> loadQueryExecutorClass(final Class<?> clazz) { 158 159 try { 160 Class<SQLQueryExecutor<K,V>> genClazz = (Class<SQLQueryExecutor<K,V>>)clazz; 161 return genClazz.getDeclaredConstructor().newInstance(); 162 } catch (Exception e) { 163 throw new PersistenceException("Couldn't load SQL query executor class: " + e.getMessage(), e); 164 } 165 } 166 167 168 /** 169 * Returns the SQL store configuration. 170 * 171 * @return The SQL store configuration, {@code null} if not 172 * initialised. 173 */ 174 public SQLStoreConfiguration getConfiguration() { 175 176 return config; 177 } 178 179 180 /** 181 * Returns the underlying SQL data source. 182 * 183 * @return The underlying SQL data source, {@code null} if not 184 * initialised. 185 */ 186 public HikariDataSource getDataSource() { 187 188 return dataSource; 189 } 190 191 192 @Override 193 public void init(final InitializationContext ctx) { 194 195 // This method will be invoked by the PersistenceManager during initialization. The InitializationContext 196 // contains: 197 // - this CacheLoader's configuration 198 // - the cache to which this loader is applied. Your loader might want to use the cache's name to construct 199 // cache-specific identifiers 200 // - the StreamingMarshaller that needs to be used to marshall/unmarshall the entries 201 // - a TimeService which the loader can use to determine expired entries 202 // - a ByteBufferFactory which needs to be used to construct ByteBuffers 203 // - a MarshalledEntryFactory which needs to be used to construct entries from the data retrieved by the loader 204 205 super.init(ctx); 206 207 this.config = ctx.getConfiguration(); 208 209 Loggers.MAIN_LOG.info("[IS0100] SQL store: Infinispan cache store configuration for {}:", getCacheName()); 210 config.log(); 211 212 Loggers.MAIN_LOG.info("[IS0140] SQL store: Expiration thread wake up interval for cache {}: {}", getCacheName(), 213 ctx.getCache().getCacheConfiguration().expiration().wakeUpInterval()); 214 215 // Load and initialise the SQL record transformer 216 Loggers.MAIN_LOG.debug("[IS0101] Loading SQL record transformer class {} for cache {}...", 217 config.getRecordTransformerClass(), 218 getCacheName()); 219 220 sqlRecordTransformer = loadRecordTransformerClass(config.getRecordTransformerClass()); 221 sqlRecordTransformer.init(() -> config.getSQLDialect()); 222 223 jooqFixes = new JOOQFixes(config.getSQLDialect()); 224 225 // Load and initialise the optional SQL query executor 226 if (config.getQueryExecutorClass() != null) { 227 Loggers.MAIN_LOG.debug("[IS0201] Loading optional SQL query executor class {} for cache {}...", 228 config.getQueryExecutorClass(), 229 getCacheName()); 230 231 sqlQueryExecutor = loadQueryExecutorClass(config.getQueryExecutorClass()); 232 233 sqlQueryExecutor.init(new SQLQueryExecutorInitContext<>() { 234 @Override 235 public DataSource getDataSource() { 236 return dataSource; 237 } 238 239 240 @Override 241 public SQLRecordTransformer<K, V> getSQLRecordTransformer() { 242 return sqlRecordTransformer; 243 } 244 245 246 @Override 247 public SQLDialect getSQLDialect() { 248 return config.getSQLDialect(); 249 } 250 }); 251 } 252 253 marshallableEntryFactory = ctx.getMarshallableEntryFactory(); 254 255 timers = new SQLTimers(ctx.getCache().getName() + "."); 256 257 Loggers.MAIN_LOG.info("[IS0102] Initialized SQL external store for cache {} with table {}", 258 getCacheName(), 259 sqlRecordTransformer.getTableName()); 260 } 261 262 263 private RetrievedSQLRecord wrap(final org.jooq.Record record) { 264 // Prevent retrieval exceptions because of the Oracle's 265 // internal conversion of all table column names to upper case 266 final boolean fieldsToUpperCase = SQLDialect.ORACLE.equals(config.getSQLDialect()); 267 return new RetrievedSQLRecordImpl(record, fieldsToUpperCase); 268 } 269 270 271 /** 272 * Returns the underlying SQL record transformer. 273 * 274 * @return The SQL record transformer, {@code null} if not initialised. 275 */ 276 public SQLRecordTransformer<K, V> getSQLRecordTransformer() { 277 return sqlRecordTransformer; 278 } 279 280 281 @Override 282 public QueryExecutor<K, V> getQueryExecutor() { 283 284 return sqlQueryExecutor; 285 } 286 287 288 /** 289 * Starts the Hikari data source using the existing configuration. 290 * 291 * @return The data source. 292 */ 293 private HikariDataSource startDataSource() { 294 295 Properties hikariProps = HikariConfigUtils.removeNonHikariProperties(config.properties()); 296 hikariProps = HikariConfigUtils.removeBlankProperties(hikariProps); 297 HikariPoolName poolName = HikariPoolName.setDefaultPoolName(hikariProps, getCacheName()); 298 299 var hikariConfig = new HikariConfig(hikariProps); 300 301 MetricRegistry metricRegistry = MonitorRegistries.getMetricRegistry(); 302 if (HikariConfigUtils.metricsAlreadyRegistered(poolName, metricRegistry)) { 303 Loggers.MAIN_LOG.warn("[IS0130] SQL store: Couldn't register Dropwizard metrics: Existing registered metrics for " + getCacheName()); 304 } else { 305 hikariConfig.setMetricRegistry(metricRegistry); 306 } 307 308 HealthCheckRegistry healthCheckRegistry = MonitorRegistries.getHealthCheckRegistry(); 309 if (HikariConfigUtils.healthChecksAlreadyRegistered(poolName, healthCheckRegistry)) { 310 Loggers.MAIN_LOG.warn("[IS0131] SQL store: Couldn't register Dropwizard health checks: Existing registered health checks for " + getCacheName()); 311 } else { 312 hikariConfig.setHealthCheckRegistry(healthCheckRegistry); 313 } 314 315 return new HikariDataSource(hikariConfig); 316 } 317 318 319 @Override 320 public void start() { 321 322 // This method will be invoked by the PersistenceManager to start the CacheLoader. At this stage configuration 323 // is complete and the loader can perform operations such as opening a connection to the external storage, 324 // initialize internal data structures, etc. 325 326 if (config.getConnectionPool() == null) { 327 // Using own data source 328 dataSource = startDataSource(); 329 SHARED_DATA_SOURCES.put(getCacheName(), dataSource); 330 } else { 331 // Using shared data source 332 dataSource = SHARED_DATA_SOURCES.get(config.getConnectionPool()); 333 if (dataSource == null) { 334 // Defer start when connection pool becomes available 335 SHARED_DATA_SOURCES.deferStart(config.getConnectionPool(), this); 336 return; 337 } 338 } 339 340 Loggers.MAIN_LOG.info("[IS0143] SQL store: Transaction isolation for cache {}: {}", 341 getCacheName(), TXIsolation.inspect(dataSource)); 342 343 // Init jOOQ SQL context 344 var jooqSettings = new Settings(); 345 if (SQLDialect.H2.equals(config.getSQLDialect())) { 346 // Quoted column names occasionally cause problems in H2 347 jooqSettings.setRenderNameStyle(RenderNameStyle.AS_IS); 348 } 349 sql = DSL.using(dataSource, config.getSQLDialect(), jooqSettings); 350 351 if (config.createTableIfMissing()) { 352 try { 353 Loggers.MAIN_LOG.info("[IS0136] SQL store: Executing create table {} (if missing?) for cache {}", sqlRecordTransformer.getTableName(), getCacheName()); 354 int rows = sql.execute(sqlRecordTransformer.getCreateTableStatement()); 355 if (rows > 0) { 356 Loggers.MAIN_LOG.info("[IS0129] SQL store: Created table {} for cache {}", sqlRecordTransformer.getTableName(), getCacheName()); 357 } else { 358 Loggers.MAIN_LOG.info("[IS0129] SQL store: Create table {} (if missing?) for cache {} returned {} changed rows", sqlRecordTransformer.getTableName(), getCacheName(), rows); 359 } 360 361 } catch (Exception e) { 362 String msg = "[IS0103] SQL store: Create table failed, {}: " + e.getMessage(); 363 if (config.createTableIgnoreErrors()) { 364 Loggers.MAIN_LOG.warn(msg, "continuing"); 365 } else { 366 Loggers.MAIN_LOG.fatal(msg, "aborting", e); 367 throw new PersistenceException(e.getMessage(), e); 368 } 369 } 370 371 // Alter table? 372 if (sqlRecordTransformer instanceof SQLTableTransformer) { 373 Loggers.MAIN_LOG.info("[IS0133] SQL store: Found table transformer"); 374 List<String> transformQueries = ((SQLTableTransformer)sqlRecordTransformer) 375 .getTransformTableStatements( 376 SQLTableUtils.getColumnNames(table(sqlRecordTransformer.getTableName()), sql) 377 ); 378 if (transformQueries != null) { 379 for (String query: transformQueries) { 380 Loggers.MAIN_LOG.info("[IS0134] SQL store: Executing table transform for cache {}: {}", getCacheName(), query); 381 sql.execute(query); 382 } 383 } 384 } 385 386 } else { 387 Loggers.MAIN_LOG.info("[IS0132] SQL store: Skipped create table (if missing?) step"); 388 } 389 390 Loggers.MAIN_LOG.info("[IS0104] Started SQL external store connector for cache {} with table {}", getCacheName(), sqlRecordTransformer.getTableName()); 391 392 if (sqlRecordTransformer.getKeyColumnsForExpiredEntryReaper() != null) { 393 reaper = new ExpiredEntryPagedReaper<>( 394 marshallableEntryFactory, 395 sql, 396 sqlRecordTransformer, 397 this::wrap, 398 config.getExpiredQueryPageLimit(), 399 timers.deleteTimer); 400 } else { 401 reaper = new ExpiredEntryReaper<>( 402 marshallableEntryFactory, 403 sql, 404 sqlRecordTransformer, 405 this::wrap, 406 timers.deleteTimer); 407 } 408 } 409 410 411 @Override 412 public void stop() { 413 414 super.stop(); 415 416 SHARED_DATA_SOURCES.remove(getCacheName()); 417 418 if (dataSource != null && config.getConnectionPool() == null) { 419 dataSource.close(); 420 } 421 422 Loggers.MAIN_LOG.info("[IS0105] Stopped SQL store connector for cache {}", getCacheName()); 423 } 424 425 426 @SuppressWarnings("unchecked") 427 private K resolveKey(final Object key) { 428 429 if (key instanceof byte[]) { 430 throw new PersistenceException("Cannot resolve " + getCacheName() + " cache key from byte[], enable compatibility mode"); 431 } 432 433 return (K)key; 434 } 435 436 437 @Override 438 public boolean contains(final Object key) { 439 440 // This method will be invoked by the PersistenceManager to determine if the loader contains the specified key. 441 // The implementation should be as fast as possible, e.g. it should strive to transfer the least amount of data possible 442 // from the external storage to perform the check. Also, if possible, make sure the field is indexed on the external storage 443 // so that its existence can be determined as quickly as possible. 444 // 445 // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol 446 // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[]. 447 448 Loggers.SQL_LOG.trace("[IS0106] SQL store: Checking {} cache key {}", getCacheName(), key); 449 450 try (Timer.Context timerCtx = timers.loadTimer.time()) { 451 return sql.selectOne() 452 .from(table(sqlRecordTransformer.getTableName())) 453 .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key))) 454 .fetchOne() != null; 455 } catch (Exception e) { 456 Loggers.SQL_LOG.error("[IS0107] {}: {}", e.getMessage(), e); 457 throw new PersistenceException(e.getMessage(), e); 458 } 459 } 460 461 462 @Override 463 public MarshallableEntry<K, V> loadEntry(final Object key) { 464 465 // Outdated? 466 // Fetches an entry from the storage using the specified key. The CacheLoader should retrieve from the external storage all 467 // data that is needed to reconstruct the entry in memory, i.e. the value and optionally the metadata. This method needs to 468 // return a MarshalledEntry which can be constructed as follows: 469 // 470 // ctx.getMarshalledEntryFactory().new MarshalledEntry(key, value, metadata); 471 // 472 // If the entry does not exist or has expired, this method should return null. 473 // If an error occurs while retrieving data from the external storage, this method should throw a PersistenceException 474 // 475 // Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol 476 // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[]. 477 // If the loader needs to have knowledge of the key/value data beyond their binary representation, then it needs access to the key's and value's 478 // classes and the marshaller used to encode them. 479 480 Loggers.SQL_LOG.trace("[IS0108] SQL store: Loading {} cache entry with key {}", getCacheName(), key); 481 482 final org.jooq.Record record; 483 484 try (Timer.Context timerCtx = timers.loadTimer.time()) { 485 record = sql.selectFrom(table(sqlRecordTransformer.getTableName())) 486 .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key))) 487 .fetchOne(); 488 } catch (Exception e) { 489 Loggers.SQL_LOG.error("[IS0109] {}, {}", e.getMessage(), e); 490 throw new PersistenceException(e.getMessage(), e); 491 } 492 493 if (record == null) { 494 // Not found 495 Loggers.SQL_LOG.trace("[IS0110] SQL store: Record with key {} not found", key); 496 return null; 497 } 498 499 if (Loggers.SQL_LOG.isTraceEnabled()) { 500 Loggers.SQL_LOG.trace("[IS0111] SQL store: Retrieved record: {}", record); 501 } 502 503 // Transform SQL record to Infinispan entry 504 InfinispanEntry<K,V> infinispanEntry; 505 try { 506 infinispanEntry = sqlRecordTransformer.toInfinispanEntry(wrap(record)); 507 } catch (Exception e) { 508 Loggers.SQL_LOG.error("[IS0137] SQL store: Error transforming SQL record for key " + key + ": " + e.getMessage()); 509 throw e; 510 } 511 512 if (infinispanEntry.isExpired()) { 513 Loggers.SQL_LOG.trace("[IS0135] SQL store: Record with key {} expired", key); 514 return null; 515 } 516 517 return marshallableEntryFactory.create( 518 infinispanEntry.getKey(), 519 infinispanEntry.getValue(), 520 infinispanEntry.getMetadata(), 521 PrivateMetadata.empty(), 522 infinispanEntry.created(), 523 infinispanEntry.lastUsed() 524 ); 525 } 526 527 528 @Override 529 public boolean delete(final Object key) { 530 531 // The CacheWriter should remove from the external storage the entry identified by the specified key. 532 // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol 533 // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[]. 534 535 Loggers.SQL_LOG.trace("[IS0112] SQL store: Deleting {} cache entry with key {}", getCacheName(), key); 536 537 int deletedRows; 538 539 try (Timer.Context timerCtx = timers.deleteTimer.time()) { 540 deletedRows = sql.deleteFrom(table(sqlRecordTransformer.getTableName())) 541 .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key))) 542 .execute(); 543 } catch (Exception e) { 544 Loggers.SQL_LOG.error("[IS0113] {}, {}", e.getMessage(), e); 545 throw new PersistenceException(e.getMessage(), e); 546 } 547 548 Loggers.SQL_LOG.trace("[IS0113] SQL store: Deleted {} record with key {}", deletedRows, key); 549 550 if (deletedRows == 1) { 551 return true; 552 } else if (deletedRows == 0) { 553 return false; 554 } else { 555 Loggers.SQL_LOG.error("[IS0114] Too many deleted rows ({}) for key {}", deletedRows, key); 556 throw new PersistenceException("Too many deleted rows for key " + key); 557 } 558 } 559 560 561 @Override 562 public void write(final MarshallableEntry<? extends K, ? extends V> entry) { 563 564 Loggers.SQL_LOG.trace("[IS0115] SQL store: Writing {} cache entry {}", getCacheName(), entry); 565 566 try (Timer.Context timerCtx = timers.writeTimer.time()) { 567 SQLRecord sqlRecord = sqlRecordTransformer.toSQLRecord( 568 new InfinispanEntry<>( 569 entry.getKey(), 570 entry.getValue(), 571 new InternalMetadataBuilder() 572 .created(entry.created()) 573 .lastUsed(entry.lastUsed()) 574 .lifespan(entry.getMetadata() != null ? entry.getMetadata().lifespan() : -1L) 575 .maxIdle(entry.getMetadata() != null ? entry.getMetadata().maxIdle() : -1L) 576 .build())); 577 578 // Oracle (N)CLOB chunking 579 // https://stackoverflow.com/a/63957679/429425 580 final AtomicReference<Map<String, List<String>>> oracleClobChunksMap = new AtomicReference<>(); 581 sqlRecord = jooqFixes.prepareOracleWriteCLOB(sqlRecord, oracleClobChunksMap); 582 583 Query query = resolveSQLQueryForWrite(sqlRecord); 584 585 int rows; 586 if (oracleClobChunksMap.get() != null && !oracleClobChunksMap.get().isEmpty()) { 587 rows = sql.execute(jooqFixes.completeOracleWriteClob(query.toString(), oracleClobChunksMap.get())); 588 } else { 589 rows = query.execute(); 590 } 591 592 if (rows != 1) { 593 594 if (SQLDialect.MYSQL.equals(config.getSQLDialect()) && rows == 2) { 595 // MySQL indicates UPDATE on INSERT by returning 2 num rows 596 return; 597 } 598 599 Loggers.SQL_LOG.error("[IS0116] SQL insert / update for key {} in table {} failed: Rows {}", 600 entry.getKey(),sqlRecordTransformer.getTableName(), rows); 601 throw new PersistenceException("(Synthetic) SQL MERGE failed: Rows " + rows); 602 } 603 604 } catch (Exception e) { 605 Loggers.SQL_LOG.error("[IS0117] {}: {}", e.getMessage(), e); 606 throw new PersistenceException(e.getMessage(), e); 607 } 608 } 609 610 611 private Query resolveSQLQueryForWrite(final SQLRecord sqlRecord) { 612 613 if (SQLDialect.POSTGRES_9_5.equals(config.getSQLDialect()) || SQLDialect.ORACLE.equals(config.getSQLDialect())) { 614 // Use INSERT with ON CONFLICT 615 // https://www.jooq.org/doc/latest/manual/sql-building/sql-statements/insert-statement/insert-on-duplicate-key/ 616 return sql.insertInto(table(sqlRecordTransformer.getTableName())) 617 .columns(sqlRecord.getFields().keySet()) 618 .values(sqlRecord.getFields().values()) 619 .onConflict(sqlRecord.getKeyColumns()) 620 .doUpdate() 621 .set(removeKeyFields(sqlRecord.getFields(), sqlRecord.getKeyColumns())); 622 } else { 623 // Use H2 style MERGE, JOOQ will adapt it for the particular database 624 // http://www.jooq.org/doc/3.8/manual/sql-building/sql-statements/merge-statement/ 625 return sql.mergeInto(table(sqlRecordTransformer.getTableName()), sqlRecord.getFields().keySet()) 626 .key(sqlRecord.getKeyColumns()) 627 .values(sqlRecord.getFields().values()); 628 } 629 } 630 631 632 private static Map<Field<?>,?> removeKeyFields(final Map<Field<?>,?> fields, final List<Field<?>> keyCols) { 633 634 Map<Field<?>, Object> filtered = new HashMap<>(); 635 636 fields.forEach((BiConsumer<Field<?>, Object>) (field, o) -> { 637 if (! keyCols.contains(field)) { 638 filtered.put(field, o); // Non-key field 639 } 640 }); 641 642 return filtered; 643 } 644 645 646 @Override 647 public Publisher<MarshallableEntry<K, V>> entryPublisher(final Predicate<? super K> filter, final boolean fetchValue, final boolean fetchMetadata) { 648 649 Loggers.SQL_LOG.trace("[IS0118] SQL store: Processing key filter for {} cache: fetchValue={} fetchMetadata={}", 650 getCacheName(), fetchValue, fetchMetadata); 651 652 final Instant now = Instant.now(); 653 654 return Flowable.using(timers.processTimer::time, 655 ignore -> Flowable.fromIterable(sql.selectFrom(table(sqlRecordTransformer.getTableName())).fetch()) 656 .map(record -> sqlRecordTransformer.toInfinispanEntry(wrap(record))) 657 .filter(infinispanEntry -> filter == null || filter.test(infinispanEntry.getKey())) 658 .filter(infinispanEntry -> ! infinispanEntry.isExpired(now)) 659 .map(infinispanEntry -> marshallableEntryFactory.create( 660 infinispanEntry.getKey(), 661 infinispanEntry.getValue(), 662 infinispanEntry.getMetadata(), 663 PrivateMetadata.empty(), 664 infinispanEntry.created(), 665 infinispanEntry.lastUsed() 666 )) 667 .doOnError(e -> Loggers.SQL_LOG.error("[IS0119] {}: {}", e.getMessage(), e)), 668 Timer.Context::stop); 669 } 670 671 672 @Override 673 public int size() { 674 675 // Infinispan code analysis on 8.2 shows that this method is never called in practice, and 676 // is not wired to the data / cache container API 677 678 Loggers.SQL_LOG.trace("[IS0120] SQL store: Counting {} records", getCacheName()); 679 680 final int count; 681 682 try { 683 count = sql.fetchCount(table(sqlRecordTransformer.getTableName())); 684 685 } catch (Exception e) { 686 Loggers.SQL_LOG.error("[IS0121] {}: {}", e.getMessage(), e); 687 throw new PersistenceException(e.getMessage(), e); 688 } 689 690 Loggers.SQL_LOG.trace("[IS0122] SQL store: Counted {} {} records", count, getCacheName()); 691 692 return count; 693 } 694 695 696 @Override 697 public void clear() { 698 699 Loggers.SQL_LOG.trace("[IS0123] SQL store: Clearing {} records", getCacheName()); 700 701 int numDeleted; 702 703 try { 704 numDeleted = sql.deleteFrom(table(sqlRecordTransformer.getTableName())).execute(); 705 706 } catch (Exception e) { 707 Loggers.SQL_LOG.error("[IS0124] {}: {}", e.getMessage(), e); 708 throw new PersistenceException(e.getMessage(), e); 709 } 710 711 Loggers.SQL_LOG.info("[IS0125] SQL store: Cleared {} {} records", numDeleted, sqlRecordTransformer.getTableName()); 712 } 713 714 715 @Override 716 public void purge(final Executor executor, final PurgeListener<? super K> purgeListener) { 717 718 // Should never be called in the presence of purge(Executor,ExpirationPurgeListener) 719 720 Loggers.SQL_LOG.trace("[IS0126] SQL store: Purging {} cache entries", getCacheName()); 721 722 try (Timer.Context timerCtx = timers.purgeTimer.time()) { 723 executor.execute(() -> { 724 try { 725 reaper.purgeWithKeyListener(purgeListener); 726 } catch (Exception e) { 727 Loggers.SQL_LOG.warn("[IS0153] Purge failed, will retry on next run: {}", e.getMessage(), e); 728 } 729 }); 730 } catch (Exception e) { 731 Loggers.SQL_LOG.error("[IS0127] Failed to submit purge task: {}", e.getMessage(), e); 732 } 733 } 734 735 736 @Override 737 public void purge(final Executor executor, final ExpirationPurgeListener<K,V> purgeListener) { 738 739 Loggers.SQL_LOG.trace("[IS0150] SQL store: Purging {} cache entries", getCacheName()); 740 741 try (Timer.Context timerCtx = timers.purgeTimer.time()) { 742 executor.execute(() -> { 743 try { 744 reaper.purgeWithEntryListener(purgeListener); 745 } catch (Exception e) { 746 Loggers.SQL_LOG.warn("[IS0152] Purge failed, will retry on next run: {}", e.getMessage(), e); 747 } 748 }); 749 } catch (Exception e) { 750 Loggers.SQL_LOG.error("[IS0151] Failed to submit purge task: {}", e.getMessage(), e); 751 } 752 } 753}