IOLoops
lamp.data.IOLoops$
object IOLoops
Contains a training loops and helpers around it
The two training loops implemented here are:
- lamp.data.IOLoops.epochs
- lamp.data.IOLoops.withSWA implements Stochastic Weight Averaging
Attributes
- Graph
-
- Supertypes
-
class Objecttrait Matchableclass Any
- Self type
-
IOLoops.type
Members list
Type members
Classlikes
case class TrainingLoopContext(epoch: Int, lastValidationLoss: Option[Double], minValidationLoss: Option[Double])
Attributes
- Companion
- object
- Supertypes
-
trait Serializabletrait Producttrait Equalsclass Objecttrait Matchableclass AnyShow all
object TrainingLoopContext
Attributes
- Companion
- class
- Supertypes
-
trait Producttrait Mirrorclass Objecttrait Matchableclass Any
- Self type
-
TrainingLoopContext.type
Value members
Concrete methods
def epochs[I, M <: GenericModule[I, Variable] : Load, LRState, BatchStreamState, BatchStreamBuffers](model: SupervisedModel[I, M], optimizerFactory: Seq[(STen, PTag)] => Optimizer, trainBatchesOverEpoch: TrainingLoopContext => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers], validationBatchesOverEpoch: Option[TrainingLoopContext => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers]], epochs: Int, trainingCallback: TrainingCallback, validationCallback: ValidationCallback, checkpointState: Option[(SimpleLoopState, LRState) => IO[Unit]], validationFrequency: Int, logger: Option[Logger], returnMinValidationLossModel: Seq[Int], learningRateSchedule: LearningRateSchedule[LRState], prefetch: Boolean, dataParallelModels: Seq[SupervisedModel[I, M]], initState: Option[SimpleLoopState], accumulateGradientOverNBatches: Int, learningRateScheduleInitState: Option[LRState], printOptimizerAllocations: Boolean, validationLossExponentialSmoothingFactor: Double): IO[(Int, SupervisedModel[I, M], List[(Int, Double, Option[(Double, Double)])], LRState, SimpleLoopState)]
def forwardAndDiscardBatchStream[I, M <: GenericModule[I, Variable], S, C](batchStream: BatchStream[(I, STen), S, C], buffers: Device => Resource[IO, C], model: M & GenericModule[I, Variable]): IO[Unit]
def oneEpoch[I, M <: GenericModule[I, Variable], S, C](epochCount: Long, trainingCallback: TrainingCallback, model: ModelWithOptimizer[I, M], trainBatches: BatchStream[(I, STen), S, C], logger: Option[Logger], learningRateScheduleFactor: Double, prefetch: Boolean, accumulateGradientOverNBatches: Int): IO[Double]
def parallelRunBatchStream[I, O, M <: GenericModule[I, O], S, O2 : Movable, C](batchStream: BatchStream[(I, STen), S, C], bufferPerModel: Resource[IO, List[(Device, C)]], models: Seq[M & GenericModule[I, O]])(tx: ((I, STen), O) => O2)(implicit evidence$2: Movable[O2], scope: Scope): IO[Vector[O2]]
def runBatchStream[A, B : Movable, M <: GenericModule[A, B], S, C](batchStream: BatchStream[A, S, C], buffers: Resource[IO, C], model: M & GenericModule[A, B])(implicit evidence$1: Movable[B], scope: Scope): IO[Vector[B]]
def validationOneEpoch[I, M <: GenericModule[I, Variable], S, C](model: SupervisedModel[I, M], validationBatches: BatchStream[(I, STen), S, C], validationCallback: ValidationCallback, logger: Option[Logger], epochCount: Long): IO[Double]
def withSWA[I, M <: GenericModule[I, Variable] : Load, LRState, LRStateSWA, BatchStreamState, BatchStreamBuffers](model: SupervisedModel[I, M], optimizerFactory: Seq[(STen, PTag)] => Optimizer, trainBatchesOverEpoch: TrainingLoopContext => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers], warmupEpochs: Int, swaEpochs: Int, validationBatchesOverEpoch: Option[TrainingLoopContext => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers]], trainingCallback: TrainingCallback, validationCallback: ValidationCallback, checkpointState: Option[(SimpleThenSWALoopState, Either[LRState, LRStateSWA]) => IO[Unit]], logger: Option[Logger], returnMinValidationLossModel: Seq[Int], learningRateSchedule: LearningRateSchedule[LRState], swaLearningRateSchedule: SWALearningRateSchedule[LRStateSWA], prefetch: Boolean, dataParallelModels: Seq[SupervisedModel[I, M]], initState: Option[SimpleThenSWALoopState], accumulateGradientOverNBatches: Int, learningRateScheduleInitState: Option[LRState], swaLearningRateScheduleInitState: Option[LRStateSWA], swaForwardPassAfterTraining: Boolean, validationLossExponentialSmoothingFactor: Double): IO[(Int, SupervisedModel[I, M], List[(Int, Double, Option[(Double, Double)])], SupervisedModel[I, M])]
In this article