t

com.johnsnowlabs.ml.tensorflow

TensorflowForClassification

trait TensorflowForClassification extends AnyRef

Ordering
  1. Alphabetic
  2. By Inheritance
Inherited
  1. TensorflowForClassification
  2. AnyRef
  3. Any
  1. Hide All
  2. Show All
Visibility
  1. Public
  2. All

Abstract Value Members

  1. abstract def findIndexedToken(tokenizedSentences: Seq[TokenizedSentence], sentence: (WordpieceTokenizedSentence, Int), tokenPiece: TokenPiece): Option[IndexedToken]
  2. abstract val sentenceEndTokenId: Int
    Attributes
    protected
  3. abstract val sentencePadTokenId: Int
    Attributes
    protected
  4. abstract val sentenceStartTokenId: Int
    Attributes
    protected
  5. abstract def tag(batch: Seq[Array[Int]]): Seq[Array[Array[Float]]]
  6. abstract def tagSequence(batch: Seq[Array[Int]], activation: String): Array[Array[Float]]
  7. abstract def tagSpan(batch: Seq[Array[Int]]): (Array[Array[Float]], Array[Array[Float]])
  8. abstract def tokenizeDocument(docs: Seq[Annotation], maxSeqLength: Int, caseSensitive: Boolean): Seq[WordpieceTokenizedSentence]
  9. abstract def tokenizeWithAlignment(sentences: Seq[TokenizedSentence], maxSeqLength: Int, caseSensitive: Boolean): Seq[WordpieceTokenizedSentence]

Concrete Value Members

  1. final def !=(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  2. final def ##(): Int
    Definition Classes
    AnyRef → Any
  3. final def ==(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  4. final def asInstanceOf[T0]: T0
    Definition Classes
    Any
  5. def calculateSigmoid(scores: Array[Float]): Array[Float]

    Calcuate sigmoid from returned logits

    Calcuate sigmoid from returned logits

    scores

    logits output from output layer

  6. def calculateSoftmax(scores: Array[Float]): Array[Float]

    Calculate softmax from returned logits

    Calculate softmax from returned logits

    scores

    logits output from output layer

  7. def clone(): AnyRef
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( ... ) @native()
  8. def constructAnnotationForSequenceClassifier(sentence: Sentence, label: String, meta: Array[(String, String)]): Annotation
  9. def constructMetaForSequenceClassifier(tags: Map[String, Int], scores: Array[Float]): Array[(String, String)]
  10. def encode(sentences: Seq[(WordpieceTokenizedSentence, Int)], maxSequenceLength: Int): Seq[Array[Int]]

    Encode the input sequence to indexes IDs adding padding where necessary

  11. def encodeSequence(seq1: Seq[WordpieceTokenizedSentence], seq2: Seq[WordpieceTokenizedSentence], maxSequenceLength: Int): Seq[Array[Int]]
  12. final def eq(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  13. def equals(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  14. def finalize(): Unit
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( classOf[java.lang.Throwable] )
  15. final def getClass(): Class[_]
    Definition Classes
    AnyRef → Any
    Annotations
    @native()
  16. def hashCode(): Int
    Definition Classes
    AnyRef → Any
    Annotations
    @native()
  17. final def isInstanceOf[T0]: Boolean
    Definition Classes
    Any
  18. final def ne(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  19. final def notify(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  20. final def notifyAll(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  21. def predict(tokenizedSentences: Seq[TokenizedSentence], batchSize: Int, maxSentenceLength: Int, caseSensitive: Boolean, tags: Map[String, Int]): Seq[Annotation]
  22. def predictSequence(tokenizedSentences: Seq[TokenizedSentence], sentences: Seq[Sentence], batchSize: Int, maxSentenceLength: Int, caseSensitive: Boolean, coalesceSentences: Boolean = false, tags: Map[String, Int], activation: String = ActivationFunction.softmax): Seq[Annotation]
  23. def predictSpan(documents: Seq[Annotation], maxSentenceLength: Int, caseSensitive: Boolean, mergeTokenStrategy: String = MergeTokenStrategy.vocab): Seq[Annotation]
  24. def scoresToLabelForSequenceClassifier(tags: Map[String, Int], scores: Array[Float]): String
  25. final def synchronized[T0](arg0: ⇒ T0): T0
    Definition Classes
    AnyRef
  26. def toString(): String
    Definition Classes
    AnyRef → Any
  27. final def wait(): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  28. final def wait(arg0: Long, arg1: Int): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  29. final def wait(arg0: Long): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... ) @native()
  30. def wordAndSpanLevelAlignmentWithTokenizer(tokenLogits: Array[Array[Float]], tokenizedSentences: Seq[TokenizedSentence], sentence: (WordpieceTokenizedSentence, Int), tags: Map[String, Int]): Seq[Annotation]

    Word-level and span-level alignment with Tokenizer https://github.com/google-research/bert#tokenization

    Word-level and span-level alignment with Tokenizer https://github.com/google-research/bert#tokenization

    ### Input orig_tokens = ["John", "Johanson", "'s", "house"] labels = ["NNP", "NNP", "POS", "NN"]

    # bert_tokens == ["[CLS]", "john", "johan", "##son", "'", "s", "house", "[SEP]"] # orig_to_tok_map == [1, 2, 4, 6]

Inherited from AnyRef

Inherited from Any

Ungrouped