public interface NeuralNetwork extends Serializable, Cloneable, NeuralNetEpochListener
Modifier and Type | Interface and Description |
---|---|
static class |
NeuralNetwork.LossFunction
Which loss function to use
|
static class |
NeuralNetwork.OptimizationAlgorithm
Optimization algorithm to use
|
Modifier and Type | Method and Description |
---|---|
NeuralNetwork |
clone() |
double |
dropOut() |
void |
epochDone(int epoch)
Event listener for each iteration
|
double |
fanIn() |
AdaGrad |
getAdaGrad() |
org.apache.commons.math3.distribution.RealDistribution |
getDist() |
NeuralNetworkGradient |
getGradient(Object[] params) |
List<NeuralNetworkGradientListener> |
getGradientListeners() |
org.jblas.DoubleMatrix |
gethBias() |
AdaGrad |
gethBiasAdaGrad() |
org.jblas.DoubleMatrix |
getInput() |
double |
getL2() |
NeuralNetwork.LossFunction |
getLossFunction() |
double |
getMomentum() |
int |
getnHidden() |
int |
getnVisible() |
NeuralNetwork.OptimizationAlgorithm |
getOptimizationAlgorithm() |
double |
getReConstructionCrossEntropy()
Error on reconstruction
|
int |
getRenderEpochs() |
org.apache.commons.math3.random.RandomGenerator |
getRng() |
double |
getSparsity() |
org.jblas.DoubleMatrix |
getvBias() |
AdaGrad |
getVBiasAdaGrad() |
org.jblas.DoubleMatrix |
getW() |
org.jblas.DoubleMatrix |
hBiasMean() |
double |
l2RegularizedCoefficient() |
void |
merge(NeuralNetwork network,
int batchSize)
Performs a network merge in the form of
a += b - a / n
where a is a matrix here
b is a matrix on the incoming network
and n is the batch size
|
double |
negativeLogLikelihood() |
boolean |
normalizeByInputRows() |
void |
resetAdaGrad(double lr) |
Pair<org.jblas.DoubleMatrix,org.jblas.DoubleMatrix> |
sampleHiddenGivenVisible(org.jblas.DoubleMatrix v)
Sample hidden mean and sample
given visible
|
Pair<org.jblas.DoubleMatrix,org.jblas.DoubleMatrix> |
sampleVisibleGivenHidden(org.jblas.DoubleMatrix h)
Sample visible mean and sample
given hidden
|
void |
setAdaGrad(AdaGrad adaGrad) |
void |
setDist(org.apache.commons.math3.distribution.RealDistribution dist) |
void |
setDropOut(double dropOut) |
void |
setFanIn(double fanIn) |
void |
setGradientListeners(List<NeuralNetworkGradientListener> gradientListeners) |
void |
sethBias(org.jblas.DoubleMatrix hBias) |
void |
setHbiasAdaGrad(AdaGrad adaGrad) |
void |
setInput(org.jblas.DoubleMatrix input) |
void |
setL2(double l2) |
void |
setLossFunction(NeuralNetwork.LossFunction lossFunction) |
void |
setMomentum(double momentum) |
void |
setnHidden(int nHidden) |
void |
setnVisible(int nVisible) |
void |
setOptimizationAlgorithm(NeuralNetwork.OptimizationAlgorithm optimziationAlgorithm) |
void |
setRenderEpochs(int renderEpochs) |
void |
setRng(org.apache.commons.math3.random.RandomGenerator rng) |
void |
setSparsity(double sparsity) |
void |
setvBias(org.jblas.DoubleMatrix vBias) |
void |
setVBiasAdaGrad(AdaGrad adaGrad) |
void |
setW(org.jblas.DoubleMatrix w) |
double |
squaredLoss() |
void |
train(org.jblas.DoubleMatrix input,
double lr,
Object[] params)
Run one iteration
|
void |
trainTillConvergence(org.jblas.DoubleMatrix input,
double lr,
Object[] params)
Trains via an optimization algorithm such as SGD or Conjugate Gradient
|
NeuralNetwork |
transpose() |
NeuralNetwork.LossFunction getLossFunction()
void setLossFunction(NeuralNetwork.LossFunction lossFunction)
NeuralNetwork.OptimizationAlgorithm getOptimizationAlgorithm()
void setOptimizationAlgorithm(NeuralNetwork.OptimizationAlgorithm optimziationAlgorithm)
boolean normalizeByInputRows()
int getnVisible()
void setnVisible(int nVisible)
int getnHidden()
void setnHidden(int nHidden)
org.jblas.DoubleMatrix getW()
void setW(org.jblas.DoubleMatrix w)
org.jblas.DoubleMatrix gethBias()
void sethBias(org.jblas.DoubleMatrix hBias)
org.jblas.DoubleMatrix getvBias()
void setvBias(org.jblas.DoubleMatrix vBias)
org.apache.commons.math3.random.RandomGenerator getRng()
void setRng(org.apache.commons.math3.random.RandomGenerator rng)
org.jblas.DoubleMatrix getInput()
void setInput(org.jblas.DoubleMatrix input)
double squaredLoss()
double negativeLogLikelihood()
double getSparsity()
void setSparsity(double sparsity)
void setDist(org.apache.commons.math3.distribution.RealDistribution dist)
org.apache.commons.math3.distribution.RealDistribution getDist()
List<NeuralNetworkGradientListener> getGradientListeners()
void setGradientListeners(List<NeuralNetworkGradientListener> gradientListeners)
org.jblas.DoubleMatrix hBiasMean()
AdaGrad getAdaGrad()
void setAdaGrad(AdaGrad adaGrad)
AdaGrad gethBiasAdaGrad()
void setHbiasAdaGrad(AdaGrad adaGrad)
AdaGrad getVBiasAdaGrad()
void setVBiasAdaGrad(AdaGrad adaGrad)
NeuralNetworkGradient getGradient(Object[] params)
double getL2()
void setL2(double l2)
double getMomentum()
void setMomentum(double momentum)
void setRenderEpochs(int renderEpochs)
int getRenderEpochs()
NeuralNetwork transpose()
NeuralNetwork clone()
double fanIn()
void setFanIn(double fanIn)
Pair<org.jblas.DoubleMatrix,org.jblas.DoubleMatrix> sampleHiddenGivenVisible(org.jblas.DoubleMatrix v)
v
- the the visible inputvoid setDropOut(double dropOut)
double dropOut()
Pair<org.jblas.DoubleMatrix,org.jblas.DoubleMatrix> sampleVisibleGivenHidden(org.jblas.DoubleMatrix h)
h
- the the hidden inputvoid resetAdaGrad(double lr)
void epochDone(int epoch)
NeuralNetEpochListener
epochDone
in interface NeuralNetEpochListener
double l2RegularizedCoefficient()
double getReConstructionCrossEntropy()
void train(org.jblas.DoubleMatrix input, double lr, Object[] params)
input
- the input to train onlr
- the learning rate to useparams
- the extra params for the neural network(k, corruption level, max epochs,...)void trainTillConvergence(org.jblas.DoubleMatrix input, double lr, Object[] params)
input
- the input to train onlr
- the learning rate to useparams
- the params (k,corruption level, max epochs,...)void merge(NeuralNetwork network, int batchSize)
network
- the network to merge withbatchSize
- the batch size (number of training examples)
to average byCopyright © 2014. All Rights Reserved.