org.apache.spark.sql.execution.streaming
CompactibleFileStreamLog
Companion class CompactibleFileStreamLog
object CompactibleFileStreamLog
- Alphabetic
- By Inheritance
- CompactibleFileStreamLog
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- val COMPACT_FILE_SUFFIX: String
- val COMPACT_LATENCY_WARN_THRESHOLD_MS: Int
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
-
def
clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
-
def
deriveCompactInterval(defaultInterval: Int, latestCompactBatchId: Int): Int
Derives a compact interval from the latest compact batch id and a default compact interval.
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
def
getAllValidBatches(batchId: Long, compactInterval: Long): Seq[Long]
Returns all necessary logs before
batchId
(inclusive).Returns all necessary logs before
batchId
(inclusive). IfbatchId
is a compaction, just return itself. Otherwise, it will find the previous compaction batch and return all batches between it andbatchId
. - def getBatchIdFromFileName(fileName: String): Long
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
getValidBatchesBeforeCompactionBatch(compactionBatchId: Long, compactInterval: Int): Seq[Long]
Returns all valid batches before the specified
compactionBatchId
.Returns all valid batches before the specified
compactionBatchId
. They contain all logs we need to do a new compaction.E.g., if
compactInterval
is 3 andcompactionBatchId
is 5, this method should returnsSeq(2, 3, 4)
(Note: it includes the previous compaction batch 2). -
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
isCompactionBatch(batchId: Long, compactInterval: Int): Boolean
Returns if this is a compaction batch.
Returns if this is a compaction batch. FileStreamSinkLog will compact old logs every
compactInterval
commits.E.g., if
compactInterval
is 3, then 2, 5, 8, ... are all compaction batches. -
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
nextCompactionBatchId(batchId: Long, compactInterval: Long): Long
Returns the next compaction batch id after
batchId
. -
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
toString(): String
- Definition Classes
- AnyRef → Any
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()