IOLoops

object IOLoops

Contains a training loops and helpers around it

The two training loops implemented here are:

class Object
trait Matchable
class Any
IOLoops.type

Type members

Classlikes

case class TrainingLoopContext(epoch: Int, lastValidationLoss: Option[Double], minValidationLoss: Option[Double])
Companion:
object
Companion:
class

Value members

Concrete methods

def epochs[I, M <: GenericModule[I, Variable] : Load, LRState, BatchStreamState, BatchStreamBuffers](model: SupervisedModel[I, M], optimizerFactory: Seq[(STen, PTag)] => Optimizer, trainBatchesOverEpoch: TrainingLoopContext => BatchStream[I, BatchStreamState, BatchStreamBuffers], validationBatchesOverEpoch: Option[TrainingLoopContext => BatchStream[I, BatchStreamState, BatchStreamBuffers]], epochs: Int, trainingCallback: TrainingCallback, validationCallback: ValidationCallback, checkpointState: Option[(SimpleLoopState, LRState) => IO[Unit]], validationFrequency: Int, logger: Option[Logger], returnMinValidationLossModel: Seq[Int], learningRateSchedule: LearningRateSchedule[LRState], prefetch: Boolean, dataParallelModels: Seq[SupervisedModel[I, M]], initState: Option[SimpleLoopState], accumulateGradientOverNBatches: Int, learningRateScheduleInitState: Option[LRState], printOptimizerAllocations: Boolean, validationLossExponentialSmoothingFactor: Double): IO[(Int, SupervisedModel[I, M], List[(Int, Double, Option[(Double, Double)])], LRState, SimpleLoopState)]
def forwardAndDiscardBatchStream[I, M <: GenericModule[I, Variable], S, C](batchStream: BatchStream[I, S, C], buffers: Device => Resource[IO, C], model: M & GenericModule[I, Variable]): IO[Unit]
def oneEpoch[I, M <: GenericModule[I, Variable], S, C](epochCount: Long, trainingCallback: TrainingCallback, model: ModelWithOptimizer[I, M], trainBatches: BatchStream[I, S, C], logger: Option[Logger], learningRateScheduleFactor: Double, prefetch: Boolean, accumulateGradientOverNBatches: Int): IO[Double]
def parallelRunBatchStream[I, O, M <: GenericModule[I, O], S, O2 : Movable, C](batchStream: BatchStream[I, S, C], bufferPerModel: Resource[IO, List[(Device, C)]], models: Seq[M & GenericModule[I, O]])(tx: ((I, STen), O) => O2)(implicit evidence$1: Movable[O2], scope: Scope): IO[Vector[O2]]
def runBatchStream[I, M <: GenericModule[I, Variable], S, C](batchStream: BatchStream[I, S, C], buffers: Resource[IO, C], model: M & GenericModule[I, Variable])(implicit scope: Scope): IO[List[STen]]
def validationOneEpoch[I, M <: GenericModule[I, Variable], S, C](model: SupervisedModel[I, M], validationBatches: BatchStream[I, S, C], validationCallback: ValidationCallback, logger: Option[Logger], epochCount: Long): IO[Double]
def withSWA[I, M <: GenericModule[I, Variable] : Load, LRState, LRStateSWA, BatchStreamState, BatchStreamBuffers](model: SupervisedModel[I, M], optimizerFactory: Seq[(STen, PTag)] => Optimizer, trainBatchesOverEpoch: TrainingLoopContext => BatchStream[I, BatchStreamState, BatchStreamBuffers], warmupEpochs: Int, swaEpochs: Int, validationBatchesOverEpoch: Option[TrainingLoopContext => BatchStream[I, BatchStreamState, BatchStreamBuffers]], trainingCallback: TrainingCallback, validationCallback: ValidationCallback, checkpointState: Option[(SimpleThenSWALoopState, Either[LRState, LRStateSWA]) => IO[Unit]], logger: Option[Logger], returnMinValidationLossModel: Seq[Int], learningRateSchedule: LearningRateSchedule[LRState], swaLearningRateSchedule: SWALearningRateSchedule[LRStateSWA], prefetch: Boolean, dataParallelModels: Seq[SupervisedModel[I, M]], initState: Option[SimpleThenSWALoopState], accumulateGradientOverNBatches: Int, learningRateScheduleInitState: Option[LRState], swaLearningRateScheduleInitState: Option[LRStateSWA], swaForwardPassAfterTraining: Boolean, validationLossExponentialSmoothingFactor: Double): IO[(Int, SupervisedModel[I, M], List[(Int, Double, Option[(Double, Double)])], SupervisedModel[I, M])]