Value Members
-
-
final
def
!=(arg0: Any): Boolean
-
final
def
##(): Int
-
-
final
def
==(arg0: Any): Boolean
-
def
adjust(newX: T, newGrad: T, newVal: Double): (Double, T)
-
-
final
def
asInstanceOf[T0]: T0
-
def
calculateObjective(f: DiffFunction[T], x: T, history: History): (Double, T)
-
def
chooseDescentDirection(state: State, fn: DiffFunction[T]): T
-
def
clone(): AnyRef
-
def
determineStepSize(state: State, f: DiffFunction[T], dir: T): Double
-
-
-
def
finalize(): Unit
-
final
def
getClass(): Class[_]
-
def
hashCode(): Int
-
def
infiniteIterations(f: DiffFunction[T], state: State): Iterator[State]
-
-
def
initialState(f: DiffFunction[T], init: T): State
-
final
def
isInstanceOf[T0]: Boolean
-
def
iterations(f: DiffFunction[T], init: T): Iterator[State]
-
-
def
minimize(f: DiffFunction[T], init: T): T
-
def
minimizeAndReturnState(f: DiffFunction[T], init: T): State
-
-
final
def
notify(): Unit
-
final
def
notifyAll(): Unit
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
-
def
takeStep(state: State, dir: T, stepSize: Double): T
-
def
toString(): String
-
def
updateHistory(newX: T, newGrad: T, newVal: Double, f: DiffFunction[T], oldState: State): History
-
final
def
wait(): Unit
-
final
def
wait(arg0: Long, arg1: Int): Unit
-
final
def
wait(arg0: Long): Unit
Implements the Orthant-wise Limited Memory QuasiNewton method, which is a variant of LBFGS that handles L1 regularization.
Paper is Andrew and Gao (2007) Scalable Training of L1-Regularized Log-Linear Models